1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2016 Benoit Steiner (benoit.steiner.goog@gmail.com)
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_PACKET_MATH_AVX512_H
11 #define EIGEN_PACKET_MATH_AVX512_H
12 
13 namespace Eigen {
14 
15 namespace internal {
16 
17 #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
18 #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
19 #endif
20 
21 #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
22 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32
23 #endif
24 
25 #ifdef EIGEN_VECTORIZE_FMA
26 #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
27 #define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
28 #endif
29 #endif
30 
31 typedef __m512 Packet16f;
32 typedef __m512i Packet16i;
33 typedef __m512d Packet8d;
34 typedef eigen_packet_wrapper<__m256i, 1> Packet16h;
35 typedef eigen_packet_wrapper<__m256i, 2> Packet16bf;
36 
37 template <>
38 struct is_arithmetic<__m512> {
39   enum { value = true };
40 };
41 template <>
42 struct is_arithmetic<__m512i> {
43   enum { value = true };
44 };
45 template <>
46 struct is_arithmetic<__m512d> {
47   enum { value = true };
48 };
49 
50 template<> struct is_arithmetic<Packet16h> { enum { value = true }; };
51 
52 template <>
53 struct packet_traits<half> : default_packet_traits {
54   typedef Packet16h type;
55   // There is no half-size packet for Packet16h.
56   typedef Packet16h half;
57   enum {
58     Vectorizable = 1,
59     AlignedOnScalar = 1,
60     size = 16,
61     HasHalfPacket = 1,
62 
63     HasCmp    = 1,
64     HasAdd    = 1,
65     HasSub    = 1,
66     HasMul    = 1,
67     HasDiv    = 1,
68     HasNegate = 1,
69     HasAbs    = 1,
70     HasAbs2   = 0,
71     HasMin    = 1,
72     HasMax    = 1,
73     HasConj   = 1,
74     HasSetLinear = 0,
75     HasLog    = 1,
76     HasLog1p  = 1,
77     HasExpm1  = 1,
78     HasExp    = 1,
79     HasSqrt   = 1,
80     HasRsqrt  = 1,
81     HasSin    = EIGEN_FAST_MATH,
82     HasCos    = EIGEN_FAST_MATH,
83     HasTanh   = EIGEN_FAST_MATH,
84     HasErf    = EIGEN_FAST_MATH,
85     HasBlend = 0,
86     HasRound  = 1,
87     HasFloor  = 1,
88     HasCeil   = 1,
89     HasRint   = 1,
90     HasBessel = 1,
91     HasNdtri  = 1
92   };
93 };
94 
95 template<> struct packet_traits<float>  : default_packet_traits
96 {
97   typedef Packet16f type;
98   typedef Packet8f half;
99   enum {
100     Vectorizable = 1,
101     AlignedOnScalar = 1,
102     size = 16,
103     HasHalfPacket = 1,
104 
105     HasAbs = 1,
106     HasMin    = 1,
107     HasMax    = 1,
108     HasConj   = 1,
109     HasBlend = 0,
110     HasSin = EIGEN_FAST_MATH,
111     HasCos = EIGEN_FAST_MATH,
112 #if EIGEN_GNUC_AT_LEAST(5, 3) || (!EIGEN_COMP_GNUC_STRICT)
113     HasLog = 1,
114     HasLog1p  = 1,
115     HasExpm1  = 1,
116     HasNdtri = 1,
117     HasBessel  = 1,
118     HasExp = 1,
119     HasSqrt = EIGEN_FAST_MATH,
120     HasRsqrt = EIGEN_FAST_MATH,
121     HasTanh = EIGEN_FAST_MATH,
122     HasErf = EIGEN_FAST_MATH,
123 #endif
124     HasCmp  = 1,
125     HasDiv = 1,
126     HasRound = 1,
127     HasFloor = 1,
128     HasCeil = 1,
129     HasRint = 1
130   };
131  };
132 template<> struct packet_traits<double> : default_packet_traits
133 {
134   typedef Packet8d type;
135   typedef Packet4d half;
136   enum {
137     Vectorizable = 1,
138     AlignedOnScalar = 1,
139     size = 8,
140     HasHalfPacket = 1,
141 #if EIGEN_GNUC_AT_LEAST(5, 3) || (!EIGEN_COMP_GNUC_STRICT)
142     HasLog  = 1,
143     HasExp = 1,
144     HasSqrt = EIGEN_FAST_MATH,
145     HasRsqrt = EIGEN_FAST_MATH,
146 #endif
147     HasCmp  = 1,
148     HasDiv = 1,
149     HasRound = 1,
150     HasFloor = 1,
151     HasCeil = 1,
152     HasRint = 1
153   };
154 };
155 
156 /* TODO Implement AVX512 for integers
157 template<> struct packet_traits<int>    : default_packet_traits
158 {
159   typedef Packet16i type;
160   enum {
161     Vectorizable = 1,
162     AlignedOnScalar = 1,
163     size=8
164   };
165 };
166 */
167 
168 template <>
169 struct unpacket_traits<Packet16f> {
170   typedef float type;
171   typedef Packet8f half;
172   typedef Packet16i integer_packet;
173   typedef uint16_t mask_t;
174   enum { size = 16, alignment=Aligned64, vectorizable=true, masked_load_available=true, masked_store_available=true };
175 };
176 template <>
177 struct unpacket_traits<Packet8d> {
178   typedef double type;
179   typedef Packet4d half;
180   enum { size = 8, alignment=Aligned64, vectorizable=true, masked_load_available=false, masked_store_available=false };
181 };
182 template <>
183 struct unpacket_traits<Packet16i> {
184   typedef int type;
185   typedef Packet8i half;
186   enum { size = 16, alignment=Aligned64, vectorizable=false, masked_load_available=false, masked_store_available=false };
187 };
188 
189 template<>
190 struct unpacket_traits<Packet16h> {
191   typedef Eigen::half type;
192   typedef Packet8h half;
193   enum {size=16, alignment=Aligned32, vectorizable=true, masked_load_available=false, masked_store_available=false};
194 };
195 
196 template <>
197 EIGEN_STRONG_INLINE Packet16f pset1<Packet16f>(const float& from) {
198   return _mm512_set1_ps(from);
199 }
200 template <>
201 EIGEN_STRONG_INLINE Packet8d pset1<Packet8d>(const double& from) {
202   return _mm512_set1_pd(from);
203 }
204 template <>
205 EIGEN_STRONG_INLINE Packet16i pset1<Packet16i>(const int& from) {
206   return _mm512_set1_epi32(from);
207 }
208 
209 template <>
210 EIGEN_STRONG_INLINE Packet16f pset1frombits<Packet16f>(unsigned int from) {
211   return _mm512_castsi512_ps(_mm512_set1_epi32(from));
212 }
213 
214 template <>
215 EIGEN_STRONG_INLINE Packet8d pset1frombits<Packet8d>(const numext::uint64_t from) {
216   return _mm512_castsi512_pd(_mm512_set1_epi64(from));
217 }
218 
219 template<> EIGEN_STRONG_INLINE Packet16f pzero(const Packet16f& /*a*/) { return _mm512_setzero_ps(); }
220 template<> EIGEN_STRONG_INLINE Packet8d pzero(const Packet8d& /*a*/) { return _mm512_setzero_pd(); }
221 template<> EIGEN_STRONG_INLINE Packet16i pzero(const Packet16i& /*a*/) { return _mm512_setzero_si512(); }
222 
223 template<> EIGEN_STRONG_INLINE Packet16f peven_mask(const Packet16f& /*a*/) {
224   return _mm512_castsi512_ps(_mm512_set_epi32(0, -1, 0, -1, 0, -1, 0, -1,
225                                               0, -1, 0, -1, 0, -1, 0, -1));
226 }
227 template<> EIGEN_STRONG_INLINE Packet16i peven_mask(const Packet16i& /*a*/) {
228   return _mm512_set_epi32(0, -1, 0, -1, 0, -1, 0, -1,
229                           0, -1, 0, -1, 0, -1, 0, -1);
230 }
231 template<> EIGEN_STRONG_INLINE Packet8d peven_mask(const Packet8d& /*a*/) {
232   return _mm512_castsi512_pd(_mm512_set_epi32(0, 0, -1, -1, 0, 0, -1, -1,
233                                               0, 0, -1, -1, 0, 0, -1, -1));
234 }
235 
236 template <>
237 EIGEN_STRONG_INLINE Packet16f pload1<Packet16f>(const float* from) {
238   return _mm512_broadcastss_ps(_mm_load_ps1(from));
239 }
240 template <>
241 EIGEN_STRONG_INLINE Packet8d pload1<Packet8d>(const double* from) {
242   return _mm512_set1_pd(*from);
243 }
244 
245 template <>
246 EIGEN_STRONG_INLINE Packet16f plset<Packet16f>(const float& a) {
247   return _mm512_add_ps(
248       _mm512_set1_ps(a),
249       _mm512_set_ps(15.0f, 14.0f, 13.0f, 12.0f, 11.0f, 10.0f, 9.0f, 8.0f, 7.0f, 6.0f, 5.0f,
250                     4.0f, 3.0f, 2.0f, 1.0f, 0.0f));
251 }
252 template <>
253 EIGEN_STRONG_INLINE Packet8d plset<Packet8d>(const double& a) {
254   return _mm512_add_pd(_mm512_set1_pd(a),
255                        _mm512_set_pd(7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0));
256 }
257 
258 template <>
259 EIGEN_STRONG_INLINE Packet16f padd<Packet16f>(const Packet16f& a,
260                                               const Packet16f& b) {
261   return _mm512_add_ps(a, b);
262 }
263 template <>
264 EIGEN_STRONG_INLINE Packet8d padd<Packet8d>(const Packet8d& a,
265                                             const Packet8d& b) {
266   return _mm512_add_pd(a, b);
267 }
268 template <>
269 EIGEN_STRONG_INLINE Packet16i padd<Packet16i>(const Packet16i& a,
270                                               const Packet16i& b) {
271   return _mm512_add_epi32(a, b);
272 }
273 
274 template <>
275 EIGEN_STRONG_INLINE Packet16f psub<Packet16f>(const Packet16f& a,
276                                               const Packet16f& b) {
277   return _mm512_sub_ps(a, b);
278 }
279 template <>
280 EIGEN_STRONG_INLINE Packet8d psub<Packet8d>(const Packet8d& a,
281                                             const Packet8d& b) {
282   return _mm512_sub_pd(a, b);
283 }
284 template <>
285 EIGEN_STRONG_INLINE Packet16i psub<Packet16i>(const Packet16i& a,
286                                               const Packet16i& b) {
287   return _mm512_sub_epi32(a, b);
288 }
289 
290 template <>
291 EIGEN_STRONG_INLINE Packet16f pnegate(const Packet16f& a) {
292   return _mm512_sub_ps(_mm512_set1_ps(0.0), a);
293 }
294 template <>
295 EIGEN_STRONG_INLINE Packet8d pnegate(const Packet8d& a) {
296   return _mm512_sub_pd(_mm512_set1_pd(0.0), a);
297 }
298 
299 template <>
300 EIGEN_STRONG_INLINE Packet16f pconj(const Packet16f& a) {
301   return a;
302 }
303 template <>
304 EIGEN_STRONG_INLINE Packet8d pconj(const Packet8d& a) {
305   return a;
306 }
307 template <>
308 EIGEN_STRONG_INLINE Packet16i pconj(const Packet16i& a) {
309   return a;
310 }
311 
312 template <>
313 EIGEN_STRONG_INLINE Packet16f pmul<Packet16f>(const Packet16f& a,
314                                               const Packet16f& b) {
315   return _mm512_mul_ps(a, b);
316 }
317 template <>
318 EIGEN_STRONG_INLINE Packet8d pmul<Packet8d>(const Packet8d& a,
319                                             const Packet8d& b) {
320   return _mm512_mul_pd(a, b);
321 }
322 template <>
323 EIGEN_STRONG_INLINE Packet16i pmul<Packet16i>(const Packet16i& a,
324                                               const Packet16i& b) {
325   return _mm512_mullo_epi32(a, b);
326 }
327 
328 template <>
329 EIGEN_STRONG_INLINE Packet16f pdiv<Packet16f>(const Packet16f& a,
330                                               const Packet16f& b) {
331   return _mm512_div_ps(a, b);
332 }
333 template <>
334 EIGEN_STRONG_INLINE Packet8d pdiv<Packet8d>(const Packet8d& a,
335                                             const Packet8d& b) {
336   return _mm512_div_pd(a, b);
337 }
338 
339 #ifdef EIGEN_VECTORIZE_FMA
340 template <>
341 EIGEN_STRONG_INLINE Packet16f pmadd(const Packet16f& a, const Packet16f& b,
342                                     const Packet16f& c) {
343   return _mm512_fmadd_ps(a, b, c);
344 }
345 template <>
346 EIGEN_STRONG_INLINE Packet8d pmadd(const Packet8d& a, const Packet8d& b,
347                                    const Packet8d& c) {
348   return _mm512_fmadd_pd(a, b, c);
349 }
350 #endif
351 
352 template <>
353 EIGEN_DEVICE_FUNC inline Packet16f pselect(const Packet16f& mask,
354                                            const Packet16f& a,
355                                            const Packet16f& b) {
356   __mmask16 mask16 = _mm512_cmp_epi32_mask(
357       _mm512_castps_si512(mask), _mm512_setzero_epi32(), _MM_CMPINT_EQ);
358   return _mm512_mask_blend_ps(mask16, a, b);
359 }
360 
361 template <>
362 EIGEN_DEVICE_FUNC inline Packet8d pselect(const Packet8d& mask,
363                                           const Packet8d& a,
364                                           const Packet8d& b) {
365   __mmask8 mask8 = _mm512_cmp_epi64_mask(_mm512_castpd_si512(mask),
366                                          _mm512_setzero_epi32(), _MM_CMPINT_EQ);
367   return _mm512_mask_blend_pd(mask8, a, b);
368 }
369 
370 template <>
371 EIGEN_STRONG_INLINE Packet16f pmin<Packet16f>(const Packet16f& a,
372                                               const Packet16f& b) {
373   // Arguments are reversed to match NaN propagation behavior of std::min.
374   return _mm512_min_ps(b, a);
375 }
376 template <>
377 EIGEN_STRONG_INLINE Packet8d pmin<Packet8d>(const Packet8d& a,
378                                             const Packet8d& b) {
379   // Arguments are reversed to match NaN propagation behavior of std::min.
380   return _mm512_min_pd(b, a);
381 }
382 
383 template <>
384 EIGEN_STRONG_INLINE Packet16f pmax<Packet16f>(const Packet16f& a,
385                                               const Packet16f& b) {
386   // Arguments are reversed to match NaN propagation behavior of std::max.
387   return _mm512_max_ps(b, a);
388 }
389 template <>
390 EIGEN_STRONG_INLINE Packet8d pmax<Packet8d>(const Packet8d& a,
391                                             const Packet8d& b) {
392   // Arguments are reversed to match NaN propagation behavior of std::max.
393   return _mm512_max_pd(b, a);
394 }
395 
396 // Add specializations for min/max with prescribed NaN progation.
397 template<>
398 EIGEN_STRONG_INLINE Packet16f pmin<PropagateNumbers, Packet16f>(const Packet16f& a, const Packet16f& b) {
399   return pminmax_propagate_numbers(a, b, pmin<Packet16f>);
400 }
401 template<>
402 EIGEN_STRONG_INLINE Packet8d pmin<PropagateNumbers, Packet8d>(const Packet8d& a, const Packet8d& b) {
403   return pminmax_propagate_numbers(a, b, pmin<Packet8d>);
404 }
405 template<>
406 EIGEN_STRONG_INLINE Packet16f pmax<PropagateNumbers, Packet16f>(const Packet16f& a, const Packet16f& b) {
407   return pminmax_propagate_numbers(a, b, pmax<Packet16f>);
408 }
409 template<>
410 EIGEN_STRONG_INLINE Packet8d pmax<PropagateNumbers, Packet8d>(const Packet8d& a, const Packet8d& b) {
411   return pminmax_propagate_numbers(a, b, pmax<Packet8d>);
412 }
413 template<>
414 EIGEN_STRONG_INLINE Packet16f pmin<PropagateNaN, Packet16f>(const Packet16f& a, const Packet16f& b) {
415   return pminmax_propagate_nan(a, b, pmin<Packet16f>);
416 }
417 template<>
418 EIGEN_STRONG_INLINE Packet8d pmin<PropagateNaN, Packet8d>(const Packet8d& a, const Packet8d& b) {
419   return pminmax_propagate_nan(a, b, pmin<Packet8d>);
420 }
421 template<>
422 EIGEN_STRONG_INLINE Packet16f pmax<PropagateNaN, Packet16f>(const Packet16f& a, const Packet16f& b) {
423   return pminmax_propagate_nan(a, b, pmax<Packet16f>);
424 }
425 template<>
426 EIGEN_STRONG_INLINE Packet8d pmax<PropagateNaN, Packet8d>(const Packet8d& a, const Packet8d& b) {
427   return pminmax_propagate_nan(a, b, pmax<Packet8d>);
428 }
429 
430 
431 #ifdef EIGEN_VECTORIZE_AVX512DQ
432 template<int I_> EIGEN_STRONG_INLINE Packet8f extract256(Packet16f x) { return _mm512_extractf32x8_ps(x,I_); }
433 template<int I_> EIGEN_STRONG_INLINE Packet2d extract128(Packet8d x) { return _mm512_extractf64x2_pd(x,I_); }
434 EIGEN_STRONG_INLINE Packet16f cat256(Packet8f a, Packet8f b) { return _mm512_insertf32x8(_mm512_castps256_ps512(a),b,1); }
435 #else
436 // AVX512F does not define _mm512_extractf32x8_ps to extract _m256 from _m512
437 template<int I_> EIGEN_STRONG_INLINE Packet8f extract256(Packet16f x) {
438   return  _mm256_castsi256_ps(_mm512_extracti64x4_epi64( _mm512_castps_si512(x),I_));
439 }
440 
441 // AVX512F does not define _mm512_extractf64x2_pd to extract _m128 from _m512
442 template<int I_> EIGEN_STRONG_INLINE Packet2d extract128(Packet8d x) {
443   return _mm_castsi128_pd(_mm512_extracti32x4_epi32( _mm512_castpd_si512(x),I_));
444 }
445 
446 EIGEN_STRONG_INLINE Packet16f cat256(Packet8f a, Packet8f b) {
447   return _mm512_castsi512_ps(_mm512_inserti64x4(_mm512_castsi256_si512(_mm256_castps_si256(a)),
448                                                 _mm256_castps_si256(b),1));
449 }
450 #endif
451 
452 // Helper function for bit packing snippet of low precision comparison.
453 // It packs the flags from 32x16 to 16x16.
454 EIGEN_STRONG_INLINE __m256i Pack32To16(Packet16f rf) {
455   // Split data into small pieces and handle with AVX instructions
456   // to guarantee internal order of vector.
457   // Operation:
458   //   dst[15:0]    := Saturate16(rf[31:0])
459   //   dst[31:16]   := Saturate16(rf[63:32])
460   //   ...
461   //   dst[255:240] := Saturate16(rf[255:224])
462   __m256i lo = _mm256_castps_si256(extract256<0>(rf));
463   __m256i hi = _mm256_castps_si256(extract256<1>(rf));
464   __m128i result_lo = _mm_packs_epi32(_mm256_extractf128_si256(lo, 0),
465                                       _mm256_extractf128_si256(lo, 1));
466   __m128i result_hi = _mm_packs_epi32(_mm256_extractf128_si256(hi, 0),
467                                       _mm256_extractf128_si256(hi, 1));
468   return _mm256_insertf128_si256(_mm256_castsi128_si256(result_lo), result_hi, 1);
469 }
470 
471 template <>
472 EIGEN_STRONG_INLINE Packet16f pcmp_eq(const Packet16f& a, const Packet16f& b) {
473   __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_EQ_OQ);
474   return _mm512_castsi512_ps(
475       _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu));
476 }
477 template<> EIGEN_STRONG_INLINE Packet16f pcmp_le(const Packet16f& a, const Packet16f& b) {
478   __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_LE_OQ);
479   return _mm512_castsi512_ps(
480       _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu));
481 }
482 
483 template<> EIGEN_STRONG_INLINE Packet16f pcmp_lt(const Packet16f& a, const Packet16f& b) {
484   __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_LT_OQ);
485   return _mm512_castsi512_ps(
486       _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu));
487 }
488 
489 template<> EIGEN_STRONG_INLINE Packet16f pcmp_lt_or_nan(const Packet16f& a, const Packet16f& b) {
490   __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_NGE_UQ);
491   return _mm512_castsi512_ps(
492       _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu));
493 }
494 
495 template<> EIGEN_STRONG_INLINE Packet16i pcmp_eq(const Packet16i& a, const Packet16i& b) {
496   __mmask16 mask = _mm512_cmp_epi32_mask(a, b, _CMP_EQ_OQ);
497   return _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu);
498 }
499 
500 
501 template <>
502 EIGEN_STRONG_INLINE Packet8d pcmp_eq(const Packet8d& a, const Packet8d& b) {
503   __mmask8 mask = _mm512_cmp_pd_mask(a, b, _CMP_EQ_OQ);
504   return _mm512_castsi512_pd(
505       _mm512_mask_set1_epi64(_mm512_set1_epi64(0), mask, 0xffffffffffffffffu));
506 }
507 template <>
508 EIGEN_STRONG_INLINE Packet8d pcmp_le(const Packet8d& a, const Packet8d& b) {
509   __mmask8 mask = _mm512_cmp_pd_mask(a, b, _CMP_LE_OQ);
510   return _mm512_castsi512_pd(
511       _mm512_mask_set1_epi64(_mm512_set1_epi64(0), mask, 0xffffffffffffffffu));
512 }
513 template <>
514 EIGEN_STRONG_INLINE Packet8d pcmp_lt(const Packet8d& a, const Packet8d& b) {
515   __mmask8 mask = _mm512_cmp_pd_mask(a, b, _CMP_LT_OQ);
516   return _mm512_castsi512_pd(
517       _mm512_mask_set1_epi64(_mm512_set1_epi64(0), mask, 0xffffffffffffffffu));
518 }
519 template <>
520 EIGEN_STRONG_INLINE Packet8d pcmp_lt_or_nan(const Packet8d& a, const Packet8d& b) {
521   __mmask8 mask = _mm512_cmp_pd_mask(a, b, _CMP_NGE_UQ);
522   return _mm512_castsi512_pd(
523       _mm512_mask_set1_epi64(_mm512_set1_epi64(0), mask, 0xffffffffffffffffu));
524 }
525 
526 template<> EIGEN_STRONG_INLINE Packet16f print<Packet16f>(const Packet16f& a) { return _mm512_roundscale_ps(a, _MM_FROUND_CUR_DIRECTION); }
527 template<> EIGEN_STRONG_INLINE Packet8d print<Packet8d>(const Packet8d& a) { return _mm512_roundscale_pd(a, _MM_FROUND_CUR_DIRECTION); }
528 
529 template<> EIGEN_STRONG_INLINE Packet16f pceil<Packet16f>(const Packet16f& a) { return _mm512_roundscale_ps(a, _MM_FROUND_TO_POS_INF); }
530 template<> EIGEN_STRONG_INLINE Packet8d pceil<Packet8d>(const Packet8d& a) { return _mm512_roundscale_pd(a, _MM_FROUND_TO_POS_INF); }
531 
532 template<> EIGEN_STRONG_INLINE Packet16f pfloor<Packet16f>(const Packet16f& a) { return _mm512_roundscale_ps(a, _MM_FROUND_TO_NEG_INF); }
533 template<> EIGEN_STRONG_INLINE Packet8d pfloor<Packet8d>(const Packet8d& a) { return _mm512_roundscale_pd(a, _MM_FROUND_TO_NEG_INF); }
534 
535 template <>
536 EIGEN_STRONG_INLINE Packet16i ptrue<Packet16i>(const Packet16i& /*a*/) {
537   return _mm512_set1_epi32(0xffffffffu);
538 }
539 
540 template <>
541 EIGEN_STRONG_INLINE Packet16f ptrue<Packet16f>(const Packet16f& a) {
542   return _mm512_castsi512_ps(ptrue<Packet16i>(_mm512_castps_si512(a)));
543 }
544 
545 template <>
546 EIGEN_STRONG_INLINE Packet8d ptrue<Packet8d>(const Packet8d& a) {
547   return _mm512_castsi512_pd(ptrue<Packet16i>(_mm512_castpd_si512(a)));
548 }
549 
550 template <>
551 EIGEN_STRONG_INLINE Packet16i pand<Packet16i>(const Packet16i& a,
552                                               const Packet16i& b) {
553   return _mm512_and_si512(a,b);
554 }
555 
556 template <>
557 EIGEN_STRONG_INLINE Packet16f pand<Packet16f>(const Packet16f& a,
558                                               const Packet16f& b) {
559 #ifdef EIGEN_VECTORIZE_AVX512DQ
560   return _mm512_and_ps(a, b);
561 #else
562   return _mm512_castsi512_ps(pand(_mm512_castps_si512(a),_mm512_castps_si512(b)));
563 #endif
564 }
565 template <>
566 EIGEN_STRONG_INLINE Packet8d pand<Packet8d>(const Packet8d& a,
567                                             const Packet8d& b) {
568 #ifdef EIGEN_VECTORIZE_AVX512DQ
569   return _mm512_and_pd(a, b);
570 #else
571   Packet8d res = _mm512_undefined_pd();
572   Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0);
573   Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0);
574   res = _mm512_insertf64x4(res, _mm256_and_pd(lane0_a, lane0_b), 0);
575 
576   Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1);
577   Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1);
578   return _mm512_insertf64x4(res, _mm256_and_pd(lane1_a, lane1_b), 1);
579 #endif
580 }
581 
582 template <>
583 EIGEN_STRONG_INLINE Packet16i por<Packet16i>(const Packet16i& a, const Packet16i& b) {
584   return _mm512_or_si512(a, b);
585 }
586 
587 template <>
588 EIGEN_STRONG_INLINE Packet16f por<Packet16f>(const Packet16f& a, const Packet16f& b) {
589 #ifdef EIGEN_VECTORIZE_AVX512DQ
590   return _mm512_or_ps(a, b);
591 #else
592   return _mm512_castsi512_ps(por(_mm512_castps_si512(a),_mm512_castps_si512(b)));
593 #endif
594 }
595 
596 template <>
597 EIGEN_STRONG_INLINE Packet8d por<Packet8d>(const Packet8d& a,
598                                            const Packet8d& b) {
599 #ifdef EIGEN_VECTORIZE_AVX512DQ
600   return _mm512_or_pd(a, b);
601 #else
602   return _mm512_castsi512_pd(por(_mm512_castpd_si512(a),_mm512_castpd_si512(b)));
603 #endif
604 }
605 
606 template <>
607 EIGEN_STRONG_INLINE Packet16i pxor<Packet16i>(const Packet16i& a, const Packet16i& b) {
608   return _mm512_xor_si512(a, b);
609 }
610 
611 template <>
612 EIGEN_STRONG_INLINE Packet16f pxor<Packet16f>(const Packet16f& a, const Packet16f& b) {
613 #ifdef EIGEN_VECTORIZE_AVX512DQ
614   return _mm512_xor_ps(a, b);
615 #else
616   return _mm512_castsi512_ps(pxor(_mm512_castps_si512(a),_mm512_castps_si512(b)));
617 #endif
618 }
619 
620 template <>
621 EIGEN_STRONG_INLINE Packet8d pxor<Packet8d>(const Packet8d& a, const Packet8d& b) {
622 #ifdef EIGEN_VECTORIZE_AVX512DQ
623   return _mm512_xor_pd(a, b);
624 #else
625   return _mm512_castsi512_pd(pxor(_mm512_castpd_si512(a),_mm512_castpd_si512(b)));
626 #endif
627 }
628 
629 template <>
630 EIGEN_STRONG_INLINE Packet16i pandnot<Packet16i>(const Packet16i& a, const Packet16i& b) {
631   return _mm512_andnot_si512(b, a);
632 }
633 
634 template <>
635 EIGEN_STRONG_INLINE Packet16f pandnot<Packet16f>(const Packet16f& a, const Packet16f& b) {
636 #ifdef EIGEN_VECTORIZE_AVX512DQ
637   return _mm512_andnot_ps(b, a);
638 #else
639   return _mm512_castsi512_ps(pandnot(_mm512_castps_si512(a),_mm512_castps_si512(b)));
640 #endif
641 }
642 template <>
643 EIGEN_STRONG_INLINE Packet8d pandnot<Packet8d>(const Packet8d& a,const Packet8d& b) {
644 #ifdef EIGEN_VECTORIZE_AVX512DQ
645   return _mm512_andnot_pd(b, a);
646 #else
647   return _mm512_castsi512_pd(pandnot(_mm512_castpd_si512(a),_mm512_castpd_si512(b)));
648 #endif
649 }
650 
651 template<> EIGEN_STRONG_INLINE Packet16f pround<Packet16f>(const Packet16f& a)
652 {
653   // Work-around for default std::round rounding mode.
654   const Packet16f mask = pset1frombits<Packet16f>(static_cast<numext::uint32_t>(0x80000000u));
655   const Packet16f prev0dot5 = pset1frombits<Packet16f>(static_cast<numext::uint32_t>(0x3EFFFFFFu));
656   return _mm512_roundscale_ps(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
657 }
658 template<> EIGEN_STRONG_INLINE Packet8d pround<Packet8d>(const Packet8d& a)
659 {
660   // Work-around for default std::round rounding mode.
661   const Packet8d mask = pset1frombits<Packet8d>(static_cast<numext::uint64_t>(0x8000000000000000ull));
662   const Packet8d prev0dot5 = pset1frombits<Packet8d>(static_cast<numext::uint64_t>(0x3FDFFFFFFFFFFFFFull));
663   return _mm512_roundscale_pd(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
664 }
665 
666 template<int N> EIGEN_STRONG_INLINE Packet16i parithmetic_shift_right(Packet16i a) {
667   return _mm512_srai_epi32(a, N);
668 }
669 
670 template<int N> EIGEN_STRONG_INLINE Packet16i plogical_shift_right(Packet16i a) {
671   return _mm512_srli_epi32(a, N);
672 }
673 
674 template<int N> EIGEN_STRONG_INLINE Packet16i plogical_shift_left(Packet16i a) {
675   return _mm512_slli_epi32(a, N);
676 }
677 
678 template <>
679 EIGEN_STRONG_INLINE Packet16f pload<Packet16f>(const float* from) {
680   EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_ps(from);
681 }
682 template <>
683 EIGEN_STRONG_INLINE Packet8d pload<Packet8d>(const double* from) {
684   EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_pd(from);
685 }
686 template <>
687 EIGEN_STRONG_INLINE Packet16i pload<Packet16i>(const int* from) {
688   EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_si512(
689       reinterpret_cast<const __m512i*>(from));
690 }
691 
692 template <>
693 EIGEN_STRONG_INLINE Packet16f ploadu<Packet16f>(const float* from) {
694   EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_ps(from);
695 }
696 template <>
697 EIGEN_STRONG_INLINE Packet8d ploadu<Packet8d>(const double* from) {
698   EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_pd(from);
699 }
700 template <>
701 EIGEN_STRONG_INLINE Packet16i ploadu<Packet16i>(const int* from) {
702   EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_si512(
703       reinterpret_cast<const __m512i*>(from));
704 }
705 
706 template <>
707 EIGEN_STRONG_INLINE Packet16f ploadu<Packet16f>(const float* from, uint16_t umask) {
708   __mmask16 mask = static_cast<__mmask16>(umask);
709   EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_maskz_loadu_ps(mask, from);
710 }
711 
712 // Loads 8 floats from memory a returns the packet
713 // {a0, a0  a1, a1, a2, a2, a3, a3, a4, a4, a5, a5, a6, a6, a7, a7}
714 template <>
715 EIGEN_STRONG_INLINE Packet16f ploaddup<Packet16f>(const float* from) {
716   // an unaligned load is required here as there is no requirement
717   // on the alignment of input pointer 'from'
718   __m256i low_half = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from));
719   __m512 even_elements = _mm512_castsi512_ps(_mm512_cvtepu32_epi64(low_half));
720   __m512 pairs = _mm512_permute_ps(even_elements, _MM_SHUFFLE(2, 2, 0, 0));
721   return pairs;
722 }
723 
724 #ifdef EIGEN_VECTORIZE_AVX512DQ
725 // FIXME: this does not look optimal, better load a Packet4d and shuffle...
726 // Loads 4 doubles from memory a returns the packet {a0, a0  a1, a1, a2, a2, a3,
727 // a3}
728 template <>
729 EIGEN_STRONG_INLINE Packet8d ploaddup<Packet8d>(const double* from) {
730  __m512d x = _mm512_setzero_pd();
731   x = _mm512_insertf64x2(x, _mm_loaddup_pd(&from[0]), 0);
732   x = _mm512_insertf64x2(x, _mm_loaddup_pd(&from[1]), 1);
733   x = _mm512_insertf64x2(x, _mm_loaddup_pd(&from[2]), 2);
734   x = _mm512_insertf64x2(x, _mm_loaddup_pd(&from[3]), 3);
735   return x;
736 }
737 #else
738 template <>
739 EIGEN_STRONG_INLINE Packet8d ploaddup<Packet8d>(const double* from) {
740   __m512d x = _mm512_setzero_pd();
741   x = _mm512_mask_broadcastsd_pd(x, 0x3<<0, _mm_load_sd(from+0));
742   x = _mm512_mask_broadcastsd_pd(x, 0x3<<2, _mm_load_sd(from+1));
743   x = _mm512_mask_broadcastsd_pd(x, 0x3<<4, _mm_load_sd(from+2));
744   x = _mm512_mask_broadcastsd_pd(x, 0x3<<6, _mm_load_sd(from+3));
745   return x;
746 }
747 #endif
748 
749 // Loads 4 floats from memory a returns the packet
750 // {a0, a0  a0, a0, a1, a1, a1, a1, a2, a2, a2, a2, a3, a3, a3, a3}
751 template <>
752 EIGEN_STRONG_INLINE Packet16f ploadquad<Packet16f>(const float* from) {
753   Packet16f tmp = _mm512_castps128_ps512(ploadu<Packet4f>(from));
754   const Packet16i scatter_mask = _mm512_set_epi32(3,3,3,3, 2,2,2,2, 1,1,1,1, 0,0,0,0);
755   return _mm512_permutexvar_ps(scatter_mask, tmp);
756 }
757 
758 // Loads 2 doubles from memory a returns the packet
759 // {a0, a0  a0, a0, a1, a1, a1, a1}
760 template <>
761 EIGEN_STRONG_INLINE Packet8d ploadquad<Packet8d>(const double* from) {
762   __m256d lane0 = _mm256_set1_pd(*from);
763   __m256d lane1 = _mm256_set1_pd(*(from+1));
764   __m512d tmp = _mm512_undefined_pd();
765   tmp = _mm512_insertf64x4(tmp, lane0, 0);
766   return _mm512_insertf64x4(tmp, lane1, 1);
767 }
768 
769 template <>
770 EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet16f& from) {
771   EIGEN_DEBUG_ALIGNED_STORE _mm512_store_ps(to, from);
772 }
773 template <>
774 EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet8d& from) {
775   EIGEN_DEBUG_ALIGNED_STORE _mm512_store_pd(to, from);
776 }
777 template <>
778 EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet16i& from) {
779   EIGEN_DEBUG_ALIGNED_STORE _mm512_storeu_si512(reinterpret_cast<__m512i*>(to),
780                                                 from);
781 }
782 
783 template <>
784 EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet16f& from) {
785   EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_ps(to, from);
786 }
787 template <>
788 EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet8d& from) {
789   EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_pd(to, from);
790 }
791 template <>
792 EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet16i& from) {
793   EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_si512(
794       reinterpret_cast<__m512i*>(to), from);
795 }
796 template <>
797 EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet16f& from, uint16_t umask) {
798   __mmask16 mask = static_cast<__mmask16>(umask);
799   EIGEN_DEBUG_UNALIGNED_STORE return _mm512_mask_storeu_ps(to, mask, from);
800 }
801 
802 template <>
803 EIGEN_DEVICE_FUNC inline Packet16f pgather<float, Packet16f>(const float* from,
804                                                              Index stride) {
805   Packet16i stride_vector = _mm512_set1_epi32(convert_index<int>(stride));
806   Packet16i stride_multiplier =
807       _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
808   Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier);
809 
810   return _mm512_i32gather_ps(indices, from, 4);
811 }
812 template <>
813 EIGEN_DEVICE_FUNC inline Packet8d pgather<double, Packet8d>(const double* from,
814                                                             Index stride) {
815   Packet8i stride_vector = _mm256_set1_epi32(convert_index<int>(stride));
816   Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);
817   Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier);
818 
819   return _mm512_i32gather_pd(indices, from, 8);
820 }
821 
822 template <>
823 EIGEN_DEVICE_FUNC inline void pscatter<float, Packet16f>(float* to,
824                                                          const Packet16f& from,
825                                                          Index stride) {
826   Packet16i stride_vector = _mm512_set1_epi32(convert_index<int>(stride));
827   Packet16i stride_multiplier =
828       _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
829   Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier);
830   _mm512_i32scatter_ps(to, indices, from, 4);
831 }
832 template <>
833 EIGEN_DEVICE_FUNC inline void pscatter<double, Packet8d>(double* to,
834                                                          const Packet8d& from,
835                                                          Index stride) {
836   Packet8i stride_vector = _mm256_set1_epi32(convert_index<int>(stride));
837   Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);
838   Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier);
839   _mm512_i32scatter_pd(to, indices, from, 8);
840 }
841 
842 template <>
843 EIGEN_STRONG_INLINE void pstore1<Packet16f>(float* to, const float& a) {
844   Packet16f pa = pset1<Packet16f>(a);
845   pstore(to, pa);
846 }
847 template <>
848 EIGEN_STRONG_INLINE void pstore1<Packet8d>(double* to, const double& a) {
849   Packet8d pa = pset1<Packet8d>(a);
850   pstore(to, pa);
851 }
852 template <>
853 EIGEN_STRONG_INLINE void pstore1<Packet16i>(int* to, const int& a) {
854   Packet16i pa = pset1<Packet16i>(a);
855   pstore(to, pa);
856 }
857 
858 template<> EIGEN_STRONG_INLINE void prefetch<float>(const float*   addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
859 template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
860 template<> EIGEN_STRONG_INLINE void prefetch<int>(const int*       addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
861 
862 template <>
863 EIGEN_STRONG_INLINE float pfirst<Packet16f>(const Packet16f& a) {
864   return _mm_cvtss_f32(_mm512_extractf32x4_ps(a, 0));
865 }
866 template <>
867 EIGEN_STRONG_INLINE double pfirst<Packet8d>(const Packet8d& a) {
868   return _mm_cvtsd_f64(_mm256_extractf128_pd(_mm512_extractf64x4_pd(a, 0), 0));
869 }
870 template <>
871 EIGEN_STRONG_INLINE int pfirst<Packet16i>(const Packet16i& a) {
872   return _mm_extract_epi32(_mm512_extracti32x4_epi32(a, 0), 0);
873 }
874 
875 template<> EIGEN_STRONG_INLINE Packet16f preverse(const Packet16f& a)
876 {
877   return _mm512_permutexvar_ps(_mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15), a);
878 }
879 
880 template<> EIGEN_STRONG_INLINE Packet8d preverse(const Packet8d& a)
881 {
882   return _mm512_permutexvar_pd(_mm512_set_epi32(0, 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7), a);
883 }
884 
885 template<> EIGEN_STRONG_INLINE Packet16f pabs(const Packet16f& a)
886 {
887   // _mm512_abs_ps intrinsic not found, so hack around it
888   return _mm512_castsi512_ps(_mm512_and_si512(_mm512_castps_si512(a), _mm512_set1_epi32(0x7fffffff)));
889 }
890 template <>
891 EIGEN_STRONG_INLINE Packet8d pabs(const Packet8d& a) {
892   // _mm512_abs_ps intrinsic not found, so hack around it
893   return _mm512_castsi512_pd(_mm512_and_si512(_mm512_castpd_si512(a),
894                                    _mm512_set1_epi64(0x7fffffffffffffff)));
895 }
896 
897 template<>
898 EIGEN_STRONG_INLINE Packet16f pfrexp<Packet16f>(const Packet16f& a, Packet16f& exponent){
899   return pfrexp_generic(a, exponent);
900 }
901 
902 // Extract exponent without existence of Packet8l.
903 template<>
904 EIGEN_STRONG_INLINE
905 Packet8d pfrexp_generic_get_biased_exponent(const Packet8d& a) {
906   const Packet8d cst_exp_mask  = pset1frombits<Packet8d>(static_cast<uint64_t>(0x7ff0000000000000ull));
907   #ifdef EIGEN_VECTORIZE_AVX512DQ
908   return _mm512_cvtepi64_pd(_mm512_srli_epi64(_mm512_castpd_si512(pand(a, cst_exp_mask)), 52));
909   #else
910   return _mm512_cvtepi32_pd(_mm512_cvtepi64_epi32(_mm512_srli_epi64(_mm512_castpd_si512(pand(a, cst_exp_mask)), 52)));
911   #endif
912 }
913 
914 template<>
915 EIGEN_STRONG_INLINE Packet8d pfrexp<Packet8d>(const Packet8d& a, Packet8d& exponent) {
916   return pfrexp_generic(a, exponent);
917 }
918 
919 template<> EIGEN_STRONG_INLINE Packet16f pldexp<Packet16f>(const Packet16f& a, const Packet16f& exponent) {
920   return pldexp_generic(a, exponent);
921 }
922 
923 template<> EIGEN_STRONG_INLINE Packet8d pldexp<Packet8d>(const Packet8d& a, const Packet8d& exponent) {
924   // Clamp exponent to [-2099, 2099]
925   const Packet8d max_exponent = pset1<Packet8d>(2099.0);
926   const Packet8i e = _mm512_cvtpd_epi32(pmin(pmax(exponent, pnegate(max_exponent)), max_exponent));
927 
928   // Split 2^e into four factors and multiply.
929   const Packet8i bias = pset1<Packet8i>(1023);
930   Packet8i b = parithmetic_shift_right<2>(e);  // floor(e/4)
931 
932   // 2^b
933   const Packet8i permute_idx = _mm256_setr_epi32(0, 4, 1, 5, 2, 6, 3, 7);
934   Packet8i hi = _mm256_permutevar8x32_epi32(padd(b, bias), permute_idx);
935   Packet8i lo = _mm256_slli_epi64(hi, 52);
936   hi = _mm256_slli_epi64(_mm256_srli_epi64(hi, 32), 52);
937   Packet8d c = _mm512_castsi512_pd(_mm512_inserti64x4(_mm512_castsi256_si512(lo), hi, 1));
938   Packet8d out = pmul(pmul(pmul(a, c), c), c);  // a * 2^(3b)
939 
940   // 2^(e - 3b)
941   b = psub(psub(psub(e, b), b), b);  // e - 3b
942   hi = _mm256_permutevar8x32_epi32(padd(b, bias), permute_idx);
943   lo = _mm256_slli_epi64(hi, 52);
944   hi = _mm256_slli_epi64(_mm256_srli_epi64(hi, 32), 52);
945   c = _mm512_castsi512_pd(_mm512_inserti64x4(_mm512_castsi256_si512(lo), hi, 1));
946   out = pmul(out, c);  // a * 2^e
947   return out;
948 }
949 
950 #ifdef EIGEN_VECTORIZE_AVX512DQ
951 // AVX512F does not define _mm512_extractf32x8_ps to extract _m256 from _m512
952 #define EIGEN_EXTRACT_8f_FROM_16f(INPUT, OUTPUT)                           \
953   __m256 OUTPUT##_0 = _mm512_extractf32x8_ps(INPUT, 0);                    \
954   __m256 OUTPUT##_1 = _mm512_extractf32x8_ps(INPUT, 1)
955 #else
956 #define EIGEN_EXTRACT_8f_FROM_16f(INPUT, OUTPUT)                \
957   __m256 OUTPUT##_0 = _mm256_insertf128_ps(                     \
958       _mm256_castps128_ps256(_mm512_extractf32x4_ps(INPUT, 0)), \
959       _mm512_extractf32x4_ps(INPUT, 1), 1);                     \
960   __m256 OUTPUT##_1 = _mm256_insertf128_ps(                     \
961       _mm256_castps128_ps256(_mm512_extractf32x4_ps(INPUT, 2)), \
962       _mm512_extractf32x4_ps(INPUT, 3), 1);
963 #endif
964 
965 #ifdef EIGEN_VECTORIZE_AVX512DQ
966 #define EIGEN_INSERT_8f_INTO_16f(OUTPUT, INPUTA, INPUTB) \
967   OUTPUT = _mm512_insertf32x8(_mm512_castps256_ps512(INPUTA), INPUTB, 1);
968 #else
969 #define EIGEN_INSERT_8f_INTO_16f(OUTPUT, INPUTA, INPUTB)                    \
970   OUTPUT = _mm512_undefined_ps();                                           \
971   OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTA, 0), 0); \
972   OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTA, 1), 1); \
973   OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTB, 0), 2); \
974   OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTB, 1), 3);
975 #endif
976 
977 template <>
978 EIGEN_STRONG_INLINE float predux<Packet16f>(const Packet16f& a) {
979 #ifdef EIGEN_VECTORIZE_AVX512DQ
980   __m256 lane0 = _mm512_extractf32x8_ps(a, 0);
981   __m256 lane1 = _mm512_extractf32x8_ps(a, 1);
982   Packet8f x = _mm256_add_ps(lane0, lane1);
983   return predux<Packet8f>(x);
984 #else
985   __m128 lane0 = _mm512_extractf32x4_ps(a, 0);
986   __m128 lane1 = _mm512_extractf32x4_ps(a, 1);
987   __m128 lane2 = _mm512_extractf32x4_ps(a, 2);
988   __m128 lane3 = _mm512_extractf32x4_ps(a, 3);
989   __m128 sum = _mm_add_ps(_mm_add_ps(lane0, lane1), _mm_add_ps(lane2, lane3));
990   sum = _mm_hadd_ps(sum, sum);
991   sum = _mm_hadd_ps(sum, _mm_permute_ps(sum, 1));
992   return _mm_cvtss_f32(sum);
993 #endif
994 }
995 template <>
996 EIGEN_STRONG_INLINE double predux<Packet8d>(const Packet8d& a) {
997   __m256d lane0 = _mm512_extractf64x4_pd(a, 0);
998   __m256d lane1 = _mm512_extractf64x4_pd(a, 1);
999   __m256d sum = _mm256_add_pd(lane0, lane1);
1000   __m256d tmp0 = _mm256_hadd_pd(sum, _mm256_permute2f128_pd(sum, sum, 1));
1001   return _mm_cvtsd_f64(_mm256_castpd256_pd128(_mm256_hadd_pd(tmp0, tmp0)));
1002 }
1003 
1004 template <>
1005 EIGEN_STRONG_INLINE Packet8f predux_half_dowto4<Packet16f>(const Packet16f& a) {
1006 #ifdef EIGEN_VECTORIZE_AVX512DQ
1007   __m256 lane0 = _mm512_extractf32x8_ps(a, 0);
1008   __m256 lane1 = _mm512_extractf32x8_ps(a, 1);
1009   return _mm256_add_ps(lane0, lane1);
1010 #else
1011   __m128 lane0 = _mm512_extractf32x4_ps(a, 0);
1012   __m128 lane1 = _mm512_extractf32x4_ps(a, 1);
1013   __m128 lane2 = _mm512_extractf32x4_ps(a, 2);
1014   __m128 lane3 = _mm512_extractf32x4_ps(a, 3);
1015   __m128 sum0 = _mm_add_ps(lane0, lane2);
1016   __m128 sum1 = _mm_add_ps(lane1, lane3);
1017   return _mm256_insertf128_ps(_mm256_castps128_ps256(sum0), sum1, 1);
1018 #endif
1019 }
1020 template <>
1021 EIGEN_STRONG_INLINE Packet4d predux_half_dowto4<Packet8d>(const Packet8d& a) {
1022   __m256d lane0 = _mm512_extractf64x4_pd(a, 0);
1023   __m256d lane1 = _mm512_extractf64x4_pd(a, 1);
1024   return _mm256_add_pd(lane0, lane1);
1025 }
1026 
1027 template <>
1028 EIGEN_STRONG_INLINE float predux_mul<Packet16f>(const Packet16f& a) {
1029 //#ifdef EIGEN_VECTORIZE_AVX512DQ
1030 #if 0
1031   Packet8f lane0 = _mm512_extractf32x8_ps(a, 0);
1032   Packet8f lane1 = _mm512_extractf32x8_ps(a, 1);
1033   Packet8f res = pmul(lane0, lane1);
1034   res = pmul(res, _mm256_permute2f128_ps(res, res, 1));
1035   res = pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
1036   return pfirst(pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
1037 #else
1038   __m128 lane0 = _mm512_extractf32x4_ps(a, 0);
1039   __m128 lane1 = _mm512_extractf32x4_ps(a, 1);
1040   __m128 lane2 = _mm512_extractf32x4_ps(a, 2);
1041   __m128 lane3 = _mm512_extractf32x4_ps(a, 3);
1042   __m128 res = pmul(pmul(lane0, lane1), pmul(lane2, lane3));
1043   res = pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
1044   return pfirst(pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
1045 #endif
1046 }
1047 template <>
1048 EIGEN_STRONG_INLINE double predux_mul<Packet8d>(const Packet8d& a) {
1049   __m256d lane0 = _mm512_extractf64x4_pd(a, 0);
1050   __m256d lane1 = _mm512_extractf64x4_pd(a, 1);
1051   __m256d res = pmul(lane0, lane1);
1052   res = pmul(res, _mm256_permute2f128_pd(res, res, 1));
1053   return pfirst(pmul(res, _mm256_shuffle_pd(res, res, 1)));
1054 }
1055 
1056 template <>
1057 EIGEN_STRONG_INLINE float predux_min<Packet16f>(const Packet16f& a) {
1058   __m128 lane0 = _mm512_extractf32x4_ps(a, 0);
1059   __m128 lane1 = _mm512_extractf32x4_ps(a, 1);
1060   __m128 lane2 = _mm512_extractf32x4_ps(a, 2);
1061   __m128 lane3 = _mm512_extractf32x4_ps(a, 3);
1062   __m128 res = _mm_min_ps(_mm_min_ps(lane0, lane1), _mm_min_ps(lane2, lane3));
1063   res = _mm_min_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
1064   return pfirst(_mm_min_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
1065 }
1066 template <>
1067 EIGEN_STRONG_INLINE double predux_min<Packet8d>(const Packet8d& a) {
1068   __m256d lane0 = _mm512_extractf64x4_pd(a, 0);
1069   __m256d lane1 = _mm512_extractf64x4_pd(a, 1);
1070   __m256d res = _mm256_min_pd(lane0, lane1);
1071   res = _mm256_min_pd(res, _mm256_permute2f128_pd(res, res, 1));
1072   return pfirst(_mm256_min_pd(res, _mm256_shuffle_pd(res, res, 1)));
1073 }
1074 
1075 template <>
1076 EIGEN_STRONG_INLINE float predux_max<Packet16f>(const Packet16f& a) {
1077   __m128 lane0 = _mm512_extractf32x4_ps(a, 0);
1078   __m128 lane1 = _mm512_extractf32x4_ps(a, 1);
1079   __m128 lane2 = _mm512_extractf32x4_ps(a, 2);
1080   __m128 lane3 = _mm512_extractf32x4_ps(a, 3);
1081   __m128 res = _mm_max_ps(_mm_max_ps(lane0, lane1), _mm_max_ps(lane2, lane3));
1082   res = _mm_max_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
1083   return pfirst(_mm_max_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
1084 }
1085 
1086 template <>
1087 EIGEN_STRONG_INLINE double predux_max<Packet8d>(const Packet8d& a) {
1088   __m256d lane0 = _mm512_extractf64x4_pd(a, 0);
1089   __m256d lane1 = _mm512_extractf64x4_pd(a, 1);
1090   __m256d res = _mm256_max_pd(lane0, lane1);
1091   res = _mm256_max_pd(res, _mm256_permute2f128_pd(res, res, 1));
1092   return pfirst(_mm256_max_pd(res, _mm256_shuffle_pd(res, res, 1)));
1093 }
1094 
1095 template<> EIGEN_STRONG_INLINE bool predux_any(const Packet16f& x)
1096 {
1097   Packet16i xi = _mm512_castps_si512(x);
1098   __mmask16 tmp = _mm512_test_epi32_mask(xi,xi);
1099   return !_mm512_kortestz(tmp,tmp);
1100 }
1101 
1102 
1103 
1104 #define PACK_OUTPUT(OUTPUT, INPUT, INDEX, STRIDE) \
1105   EIGEN_INSERT_8f_INTO_16f(OUTPUT[INDEX], INPUT[INDEX], INPUT[INDEX + STRIDE]);
1106 
1107 EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet16f, 16>& kernel) {
1108   __m512 T0 = _mm512_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
1109   __m512 T1 = _mm512_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
1110   __m512 T2 = _mm512_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
1111   __m512 T3 = _mm512_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
1112   __m512 T4 = _mm512_unpacklo_ps(kernel.packet[4], kernel.packet[5]);
1113   __m512 T5 = _mm512_unpackhi_ps(kernel.packet[4], kernel.packet[5]);
1114   __m512 T6 = _mm512_unpacklo_ps(kernel.packet[6], kernel.packet[7]);
1115   __m512 T7 = _mm512_unpackhi_ps(kernel.packet[6], kernel.packet[7]);
1116   __m512 T8 = _mm512_unpacklo_ps(kernel.packet[8], kernel.packet[9]);
1117   __m512 T9 = _mm512_unpackhi_ps(kernel.packet[8], kernel.packet[9]);
1118   __m512 T10 = _mm512_unpacklo_ps(kernel.packet[10], kernel.packet[11]);
1119   __m512 T11 = _mm512_unpackhi_ps(kernel.packet[10], kernel.packet[11]);
1120   __m512 T12 = _mm512_unpacklo_ps(kernel.packet[12], kernel.packet[13]);
1121   __m512 T13 = _mm512_unpackhi_ps(kernel.packet[12], kernel.packet[13]);
1122   __m512 T14 = _mm512_unpacklo_ps(kernel.packet[14], kernel.packet[15]);
1123   __m512 T15 = _mm512_unpackhi_ps(kernel.packet[14], kernel.packet[15]);
1124   __m512 S0 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
1125   __m512 S1 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
1126   __m512 S2 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
1127   __m512 S3 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
1128   __m512 S4 = _mm512_shuffle_ps(T4, T6, _MM_SHUFFLE(1, 0, 1, 0));
1129   __m512 S5 = _mm512_shuffle_ps(T4, T6, _MM_SHUFFLE(3, 2, 3, 2));
1130   __m512 S6 = _mm512_shuffle_ps(T5, T7, _MM_SHUFFLE(1, 0, 1, 0));
1131   __m512 S7 = _mm512_shuffle_ps(T5, T7, _MM_SHUFFLE(3, 2, 3, 2));
1132   __m512 S8 = _mm512_shuffle_ps(T8, T10, _MM_SHUFFLE(1, 0, 1, 0));
1133   __m512 S9 = _mm512_shuffle_ps(T8, T10, _MM_SHUFFLE(3, 2, 3, 2));
1134   __m512 S10 = _mm512_shuffle_ps(T9, T11, _MM_SHUFFLE(1, 0, 1, 0));
1135   __m512 S11 = _mm512_shuffle_ps(T9, T11, _MM_SHUFFLE(3, 2, 3, 2));
1136   __m512 S12 = _mm512_shuffle_ps(T12, T14, _MM_SHUFFLE(1, 0, 1, 0));
1137   __m512 S13 = _mm512_shuffle_ps(T12, T14, _MM_SHUFFLE(3, 2, 3, 2));
1138   __m512 S14 = _mm512_shuffle_ps(T13, T15, _MM_SHUFFLE(1, 0, 1, 0));
1139   __m512 S15 = _mm512_shuffle_ps(T13, T15, _MM_SHUFFLE(3, 2, 3, 2));
1140 
1141   EIGEN_EXTRACT_8f_FROM_16f(S0, S0);
1142   EIGEN_EXTRACT_8f_FROM_16f(S1, S1);
1143   EIGEN_EXTRACT_8f_FROM_16f(S2, S2);
1144   EIGEN_EXTRACT_8f_FROM_16f(S3, S3);
1145   EIGEN_EXTRACT_8f_FROM_16f(S4, S4);
1146   EIGEN_EXTRACT_8f_FROM_16f(S5, S5);
1147   EIGEN_EXTRACT_8f_FROM_16f(S6, S6);
1148   EIGEN_EXTRACT_8f_FROM_16f(S7, S7);
1149   EIGEN_EXTRACT_8f_FROM_16f(S8, S8);
1150   EIGEN_EXTRACT_8f_FROM_16f(S9, S9);
1151   EIGEN_EXTRACT_8f_FROM_16f(S10, S10);
1152   EIGEN_EXTRACT_8f_FROM_16f(S11, S11);
1153   EIGEN_EXTRACT_8f_FROM_16f(S12, S12);
1154   EIGEN_EXTRACT_8f_FROM_16f(S13, S13);
1155   EIGEN_EXTRACT_8f_FROM_16f(S14, S14);
1156   EIGEN_EXTRACT_8f_FROM_16f(S15, S15);
1157 
1158   PacketBlock<Packet8f, 32> tmp;
1159 
1160   tmp.packet[0] = _mm256_permute2f128_ps(S0_0, S4_0, 0x20);
1161   tmp.packet[1] = _mm256_permute2f128_ps(S1_0, S5_0, 0x20);
1162   tmp.packet[2] = _mm256_permute2f128_ps(S2_0, S6_0, 0x20);
1163   tmp.packet[3] = _mm256_permute2f128_ps(S3_0, S7_0, 0x20);
1164   tmp.packet[4] = _mm256_permute2f128_ps(S0_0, S4_0, 0x31);
1165   tmp.packet[5] = _mm256_permute2f128_ps(S1_0, S5_0, 0x31);
1166   tmp.packet[6] = _mm256_permute2f128_ps(S2_0, S6_0, 0x31);
1167   tmp.packet[7] = _mm256_permute2f128_ps(S3_0, S7_0, 0x31);
1168 
1169   tmp.packet[8] = _mm256_permute2f128_ps(S0_1, S4_1, 0x20);
1170   tmp.packet[9] = _mm256_permute2f128_ps(S1_1, S5_1, 0x20);
1171   tmp.packet[10] = _mm256_permute2f128_ps(S2_1, S6_1, 0x20);
1172   tmp.packet[11] = _mm256_permute2f128_ps(S3_1, S7_1, 0x20);
1173   tmp.packet[12] = _mm256_permute2f128_ps(S0_1, S4_1, 0x31);
1174   tmp.packet[13] = _mm256_permute2f128_ps(S1_1, S5_1, 0x31);
1175   tmp.packet[14] = _mm256_permute2f128_ps(S2_1, S6_1, 0x31);
1176   tmp.packet[15] = _mm256_permute2f128_ps(S3_1, S7_1, 0x31);
1177 
1178   // Second set of _m256 outputs
1179   tmp.packet[16] = _mm256_permute2f128_ps(S8_0, S12_0, 0x20);
1180   tmp.packet[17] = _mm256_permute2f128_ps(S9_0, S13_0, 0x20);
1181   tmp.packet[18] = _mm256_permute2f128_ps(S10_0, S14_0, 0x20);
1182   tmp.packet[19] = _mm256_permute2f128_ps(S11_0, S15_0, 0x20);
1183   tmp.packet[20] = _mm256_permute2f128_ps(S8_0, S12_0, 0x31);
1184   tmp.packet[21] = _mm256_permute2f128_ps(S9_0, S13_0, 0x31);
1185   tmp.packet[22] = _mm256_permute2f128_ps(S10_0, S14_0, 0x31);
1186   tmp.packet[23] = _mm256_permute2f128_ps(S11_0, S15_0, 0x31);
1187 
1188   tmp.packet[24] = _mm256_permute2f128_ps(S8_1, S12_1, 0x20);
1189   tmp.packet[25] = _mm256_permute2f128_ps(S9_1, S13_1, 0x20);
1190   tmp.packet[26] = _mm256_permute2f128_ps(S10_1, S14_1, 0x20);
1191   tmp.packet[27] = _mm256_permute2f128_ps(S11_1, S15_1, 0x20);
1192   tmp.packet[28] = _mm256_permute2f128_ps(S8_1, S12_1, 0x31);
1193   tmp.packet[29] = _mm256_permute2f128_ps(S9_1, S13_1, 0x31);
1194   tmp.packet[30] = _mm256_permute2f128_ps(S10_1, S14_1, 0x31);
1195   tmp.packet[31] = _mm256_permute2f128_ps(S11_1, S15_1, 0x31);
1196 
1197   // Pack them into the output
1198   PACK_OUTPUT(kernel.packet, tmp.packet, 0, 16);
1199   PACK_OUTPUT(kernel.packet, tmp.packet, 1, 16);
1200   PACK_OUTPUT(kernel.packet, tmp.packet, 2, 16);
1201   PACK_OUTPUT(kernel.packet, tmp.packet, 3, 16);
1202 
1203   PACK_OUTPUT(kernel.packet, tmp.packet, 4, 16);
1204   PACK_OUTPUT(kernel.packet, tmp.packet, 5, 16);
1205   PACK_OUTPUT(kernel.packet, tmp.packet, 6, 16);
1206   PACK_OUTPUT(kernel.packet, tmp.packet, 7, 16);
1207 
1208   PACK_OUTPUT(kernel.packet, tmp.packet, 8, 16);
1209   PACK_OUTPUT(kernel.packet, tmp.packet, 9, 16);
1210   PACK_OUTPUT(kernel.packet, tmp.packet, 10, 16);
1211   PACK_OUTPUT(kernel.packet, tmp.packet, 11, 16);
1212 
1213   PACK_OUTPUT(kernel.packet, tmp.packet, 12, 16);
1214   PACK_OUTPUT(kernel.packet, tmp.packet, 13, 16);
1215   PACK_OUTPUT(kernel.packet, tmp.packet, 14, 16);
1216   PACK_OUTPUT(kernel.packet, tmp.packet, 15, 16);
1217 }
1218 #define PACK_OUTPUT_2(OUTPUT, INPUT, INDEX, STRIDE)         \
1219   EIGEN_INSERT_8f_INTO_16f(OUTPUT[INDEX], INPUT[2 * INDEX], \
1220                            INPUT[2 * INDEX + STRIDE]);
1221 
1222 EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet16f, 4>& kernel) {
1223   __m512 T0 = _mm512_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
1224   __m512 T1 = _mm512_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
1225   __m512 T2 = _mm512_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
1226   __m512 T3 = _mm512_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
1227 
1228   __m512 S0 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
1229   __m512 S1 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
1230   __m512 S2 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
1231   __m512 S3 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
1232 
1233   EIGEN_EXTRACT_8f_FROM_16f(S0, S0);
1234   EIGEN_EXTRACT_8f_FROM_16f(S1, S1);
1235   EIGEN_EXTRACT_8f_FROM_16f(S2, S2);
1236   EIGEN_EXTRACT_8f_FROM_16f(S3, S3);
1237 
1238   PacketBlock<Packet8f, 8> tmp;
1239 
1240   tmp.packet[0] = _mm256_permute2f128_ps(S0_0, S1_0, 0x20);
1241   tmp.packet[1] = _mm256_permute2f128_ps(S2_0, S3_0, 0x20);
1242   tmp.packet[2] = _mm256_permute2f128_ps(S0_0, S1_0, 0x31);
1243   tmp.packet[3] = _mm256_permute2f128_ps(S2_0, S3_0, 0x31);
1244 
1245   tmp.packet[4] = _mm256_permute2f128_ps(S0_1, S1_1, 0x20);
1246   tmp.packet[5] = _mm256_permute2f128_ps(S2_1, S3_1, 0x20);
1247   tmp.packet[6] = _mm256_permute2f128_ps(S0_1, S1_1, 0x31);
1248   tmp.packet[7] = _mm256_permute2f128_ps(S2_1, S3_1, 0x31);
1249 
1250   PACK_OUTPUT_2(kernel.packet, tmp.packet, 0, 1);
1251   PACK_OUTPUT_2(kernel.packet, tmp.packet, 1, 1);
1252   PACK_OUTPUT_2(kernel.packet, tmp.packet, 2, 1);
1253   PACK_OUTPUT_2(kernel.packet, tmp.packet, 3, 1);
1254 }
1255 
1256 #define PACK_OUTPUT_SQ_D(OUTPUT, INPUT, INDEX, STRIDE)                \
1257   OUTPUT[INDEX] = _mm512_insertf64x4(OUTPUT[INDEX], INPUT[INDEX], 0); \
1258   OUTPUT[INDEX] = _mm512_insertf64x4(OUTPUT[INDEX], INPUT[INDEX + STRIDE], 1);
1259 
1260 #define PACK_OUTPUT_D(OUTPUT, INPUT, INDEX, STRIDE)                         \
1261   OUTPUT[INDEX] = _mm512_insertf64x4(OUTPUT[INDEX], INPUT[(2 * INDEX)], 0); \
1262   OUTPUT[INDEX] =                                                           \
1263       _mm512_insertf64x4(OUTPUT[INDEX], INPUT[(2 * INDEX) + STRIDE], 1);
1264 
1265 EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet8d, 4>& kernel) {
1266   __m512d T0 = _mm512_shuffle_pd(kernel.packet[0], kernel.packet[1], 0);
1267   __m512d T1 = _mm512_shuffle_pd(kernel.packet[0], kernel.packet[1], 0xff);
1268   __m512d T2 = _mm512_shuffle_pd(kernel.packet[2], kernel.packet[3], 0);
1269   __m512d T3 = _mm512_shuffle_pd(kernel.packet[2], kernel.packet[3], 0xff);
1270 
1271   PacketBlock<Packet4d, 8> tmp;
1272 
1273   tmp.packet[0] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0),
1274                                          _mm512_extractf64x4_pd(T2, 0), 0x20);
1275   tmp.packet[1] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0),
1276                                          _mm512_extractf64x4_pd(T3, 0), 0x20);
1277   tmp.packet[2] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0),
1278                                          _mm512_extractf64x4_pd(T2, 0), 0x31);
1279   tmp.packet[3] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0),
1280                                          _mm512_extractf64x4_pd(T3, 0), 0x31);
1281 
1282   tmp.packet[4] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1),
1283                                          _mm512_extractf64x4_pd(T2, 1), 0x20);
1284   tmp.packet[5] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1),
1285                                          _mm512_extractf64x4_pd(T3, 1), 0x20);
1286   tmp.packet[6] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1),
1287                                          _mm512_extractf64x4_pd(T2, 1), 0x31);
1288   tmp.packet[7] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1),
1289                                          _mm512_extractf64x4_pd(T3, 1), 0x31);
1290 
1291   PACK_OUTPUT_D(kernel.packet, tmp.packet, 0, 1);
1292   PACK_OUTPUT_D(kernel.packet, tmp.packet, 1, 1);
1293   PACK_OUTPUT_D(kernel.packet, tmp.packet, 2, 1);
1294   PACK_OUTPUT_D(kernel.packet, tmp.packet, 3, 1);
1295 }
1296 
1297 EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet8d, 8>& kernel) {
1298   __m512d T0 = _mm512_unpacklo_pd(kernel.packet[0], kernel.packet[1]);
1299   __m512d T1 = _mm512_unpackhi_pd(kernel.packet[0], kernel.packet[1]);
1300   __m512d T2 = _mm512_unpacklo_pd(kernel.packet[2], kernel.packet[3]);
1301   __m512d T3 = _mm512_unpackhi_pd(kernel.packet[2], kernel.packet[3]);
1302   __m512d T4 = _mm512_unpacklo_pd(kernel.packet[4], kernel.packet[5]);
1303   __m512d T5 = _mm512_unpackhi_pd(kernel.packet[4], kernel.packet[5]);
1304   __m512d T6 = _mm512_unpacklo_pd(kernel.packet[6], kernel.packet[7]);
1305   __m512d T7 = _mm512_unpackhi_pd(kernel.packet[6], kernel.packet[7]);
1306 
1307   PacketBlock<Packet4d, 16> tmp;
1308 
1309   tmp.packet[0] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0),
1310                                          _mm512_extractf64x4_pd(T2, 0), 0x20);
1311   tmp.packet[1] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0),
1312                                          _mm512_extractf64x4_pd(T3, 0), 0x20);
1313   tmp.packet[2] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0),
1314                                          _mm512_extractf64x4_pd(T2, 0), 0x31);
1315   tmp.packet[3] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0),
1316                                          _mm512_extractf64x4_pd(T3, 0), 0x31);
1317 
1318   tmp.packet[4] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1),
1319                                          _mm512_extractf64x4_pd(T2, 1), 0x20);
1320   tmp.packet[5] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1),
1321                                          _mm512_extractf64x4_pd(T3, 1), 0x20);
1322   tmp.packet[6] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1),
1323                                          _mm512_extractf64x4_pd(T2, 1), 0x31);
1324   tmp.packet[7] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1),
1325                                          _mm512_extractf64x4_pd(T3, 1), 0x31);
1326 
1327   tmp.packet[8] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 0),
1328                                          _mm512_extractf64x4_pd(T6, 0), 0x20);
1329   tmp.packet[9] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 0),
1330                                          _mm512_extractf64x4_pd(T7, 0), 0x20);
1331   tmp.packet[10] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 0),
1332                                           _mm512_extractf64x4_pd(T6, 0), 0x31);
1333   tmp.packet[11] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 0),
1334                                           _mm512_extractf64x4_pd(T7, 0), 0x31);
1335 
1336   tmp.packet[12] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 1),
1337                                           _mm512_extractf64x4_pd(T6, 1), 0x20);
1338   tmp.packet[13] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 1),
1339                                           _mm512_extractf64x4_pd(T7, 1), 0x20);
1340   tmp.packet[14] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 1),
1341                                           _mm512_extractf64x4_pd(T6, 1), 0x31);
1342   tmp.packet[15] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 1),
1343                                           _mm512_extractf64x4_pd(T7, 1), 0x31);
1344 
1345   PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 0, 8);
1346   PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 1, 8);
1347   PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 2, 8);
1348   PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 3, 8);
1349 
1350   PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 4, 8);
1351   PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 5, 8);
1352   PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 6, 8);
1353   PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 7, 8);
1354 }
1355 template <>
1356 EIGEN_STRONG_INLINE Packet16f pblend(const Selector<16>& /*ifPacket*/,
1357                                      const Packet16f& /*thenPacket*/,
1358                                      const Packet16f& /*elsePacket*/) {
1359   assert(false && "To be implemented");
1360   return Packet16f();
1361 }
1362 template <>
1363 EIGEN_STRONG_INLINE Packet8d pblend(const Selector<8>& ifPacket,
1364                                     const Packet8d& thenPacket,
1365                                     const Packet8d& elsePacket) {
1366   __mmask8 m = (ifPacket.select[0]   )
1367              | (ifPacket.select[1]<<1)
1368              | (ifPacket.select[2]<<2)
1369              | (ifPacket.select[3]<<3)
1370              | (ifPacket.select[4]<<4)
1371              | (ifPacket.select[5]<<5)
1372              | (ifPacket.select[6]<<6)
1373              | (ifPacket.select[7]<<7);
1374   return _mm512_mask_blend_pd(m, elsePacket, thenPacket);
1375 }
1376 
1377 // Packet math for Eigen::half
1378 template<> EIGEN_STRONG_INLINE Packet16h pset1<Packet16h>(const Eigen::half& from) {
1379   return _mm256_set1_epi16(from.x);
1380 }
1381 
1382 template<> EIGEN_STRONG_INLINE Eigen::half pfirst<Packet16h>(const Packet16h& from) {
1383   return half_impl::raw_uint16_to_half(static_cast<unsigned short>(_mm256_extract_epi16(from, 0)));
1384 }
1385 
1386 template<> EIGEN_STRONG_INLINE Packet16h pload<Packet16h>(const Eigen::half* from) {
1387   return _mm256_load_si256(reinterpret_cast<const __m256i*>(from));
1388 }
1389 
1390 template<> EIGEN_STRONG_INLINE Packet16h ploadu<Packet16h>(const Eigen::half* from) {
1391   return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from));
1392 }
1393 
1394 template<> EIGEN_STRONG_INLINE void pstore<half>(Eigen::half* to, const Packet16h& from) {
1395   // (void*) -> workaround clang warning:
1396   // cast from 'Eigen::half *' to '__m256i *' increases required alignment from 2 to 32
1397   _mm256_store_si256((__m256i*)(void*)to, from);
1398 }
1399 
1400 template<> EIGEN_STRONG_INLINE void pstoreu<half>(Eigen::half* to, const Packet16h& from) {
1401   // (void*) -> workaround clang warning:
1402   // cast from 'Eigen::half *' to '__m256i *' increases required alignment from 2 to 32
1403   _mm256_storeu_si256((__m256i*)(void*)to, from);
1404 }
1405 
1406 template<> EIGEN_STRONG_INLINE Packet16h
1407 ploaddup<Packet16h>(const Eigen::half*  from) {
1408   unsigned short a = from[0].x;
1409   unsigned short b = from[1].x;
1410   unsigned short c = from[2].x;
1411   unsigned short d = from[3].x;
1412   unsigned short e = from[4].x;
1413   unsigned short f = from[5].x;
1414   unsigned short g = from[6].x;
1415   unsigned short h = from[7].x;
1416   return _mm256_set_epi16(h, h, g, g, f, f, e, e, d, d, c, c, b, b, a, a);
1417 }
1418 
1419 template<> EIGEN_STRONG_INLINE Packet16h
1420 ploadquad(const Eigen::half* from) {
1421   unsigned short a = from[0].x;
1422   unsigned short b = from[1].x;
1423   unsigned short c = from[2].x;
1424   unsigned short d = from[3].x;
1425   return _mm256_set_epi16(d, d, d, d, c, c, c, c, b, b, b, b, a, a, a, a);
1426 }
1427 
1428 EIGEN_STRONG_INLINE Packet16f half2float(const Packet16h& a) {
1429 #ifdef EIGEN_HAS_FP16_C
1430   return _mm512_cvtph_ps(a);
1431 #else
1432   EIGEN_ALIGN64 half aux[16];
1433   pstore(aux, a);
1434   float f0(aux[0]);
1435   float f1(aux[1]);
1436   float f2(aux[2]);
1437   float f3(aux[3]);
1438   float f4(aux[4]);
1439   float f5(aux[5]);
1440   float f6(aux[6]);
1441   float f7(aux[7]);
1442   float f8(aux[8]);
1443   float f9(aux[9]);
1444   float fa(aux[10]);
1445   float fb(aux[11]);
1446   float fc(aux[12]);
1447   float fd(aux[13]);
1448   float fe(aux[14]);
1449   float ff(aux[15]);
1450 
1451   return _mm512_set_ps(
1452       ff, fe, fd, fc, fb, fa, f9, f8, f7, f6, f5, f4, f3, f2, f1, f0);
1453 #endif
1454 }
1455 
1456 EIGEN_STRONG_INLINE Packet16h float2half(const Packet16f& a) {
1457 #ifdef EIGEN_HAS_FP16_C
1458   return _mm512_cvtps_ph(a, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
1459 #else
1460   EIGEN_ALIGN64 float aux[16];
1461   pstore(aux, a);
1462   half h0(aux[0]);
1463   half h1(aux[1]);
1464   half h2(aux[2]);
1465   half h3(aux[3]);
1466   half h4(aux[4]);
1467   half h5(aux[5]);
1468   half h6(aux[6]);
1469   half h7(aux[7]);
1470   half h8(aux[8]);
1471   half h9(aux[9]);
1472   half ha(aux[10]);
1473   half hb(aux[11]);
1474   half hc(aux[12]);
1475   half hd(aux[13]);
1476   half he(aux[14]);
1477   half hf(aux[15]);
1478 
1479   return _mm256_set_epi16(
1480       hf.x, he.x, hd.x, hc.x, hb.x, ha.x, h9.x, h8.x,
1481       h7.x, h6.x, h5.x, h4.x, h3.x, h2.x, h1.x, h0.x);
1482 #endif
1483 }
1484 
1485 template<> EIGEN_STRONG_INLINE Packet16h ptrue(const Packet16h& a) {
1486   return ptrue(Packet8i(a));
1487 }
1488 
1489 template <>
1490 EIGEN_STRONG_INLINE Packet16h pabs(const Packet16h& a) {
1491   const __m256i sign_mask = _mm256_set1_epi16(static_cast<numext::uint16_t>(0x8000));
1492   return _mm256_andnot_si256(sign_mask, a);
1493 }
1494 
1495 template <>
1496 EIGEN_STRONG_INLINE Packet16h pmin<Packet16h>(const Packet16h& a,
1497                                               const Packet16h& b) {
1498   return float2half(pmin<Packet16f>(half2float(a), half2float(b)));
1499 }
1500 
1501 template <>
1502 EIGEN_STRONG_INLINE Packet16h pmax<Packet16h>(const Packet16h& a,
1503                                               const Packet16h& b) {
1504   return float2half(pmax<Packet16f>(half2float(a), half2float(b)));
1505 }
1506 
1507 template <>
1508 EIGEN_STRONG_INLINE Packet16h plset<Packet16h>(const half& a) {
1509   return float2half(plset<Packet16f>(static_cast<float>(a)));
1510 }
1511 
1512 template<> EIGEN_STRONG_INLINE Packet16h por(const Packet16h& a,const Packet16h& b) {
1513   // in some cases Packet8i is a wrapper around __m256i, so we need to
1514   // cast to Packet8i to call the correct overload.
1515   return por(Packet8i(a),Packet8i(b));
1516 }
1517 template<> EIGEN_STRONG_INLINE Packet16h pxor(const Packet16h& a,const Packet16h& b) {
1518   return pxor(Packet8i(a),Packet8i(b));
1519 }
1520 template<> EIGEN_STRONG_INLINE Packet16h pand(const Packet16h& a,const Packet16h& b) {
1521   return pand(Packet8i(a),Packet8i(b));
1522 }
1523 template<> EIGEN_STRONG_INLINE Packet16h pandnot(const Packet16h& a,const Packet16h& b) {
1524   return pandnot(Packet8i(a),Packet8i(b));
1525 }
1526 
1527 template<> EIGEN_STRONG_INLINE Packet16h pselect(const Packet16h& mask, const Packet16h& a, const Packet16h& b) {
1528   return _mm256_blendv_epi8(b, a, mask);
1529 }
1530 
1531 template<> EIGEN_STRONG_INLINE Packet16h pround<Packet16h>(const Packet16h& a) {
1532   return float2half(pround<Packet16f>(half2float(a)));
1533 }
1534 
1535 template<> EIGEN_STRONG_INLINE Packet16h print<Packet16h>(const Packet16h& a) {
1536   return float2half(print<Packet16f>(half2float(a)));
1537 }
1538 
1539 template<> EIGEN_STRONG_INLINE Packet16h pceil<Packet16h>(const Packet16h& a) {
1540   return float2half(pceil<Packet16f>(half2float(a)));
1541 }
1542 
1543 template<> EIGEN_STRONG_INLINE Packet16h pfloor<Packet16h>(const Packet16h& a) {
1544   return float2half(pfloor<Packet16f>(half2float(a)));
1545 }
1546 
1547 template<> EIGEN_STRONG_INLINE Packet16h pcmp_eq(const Packet16h& a,const Packet16h& b) {
1548   Packet16f af = half2float(a);
1549   Packet16f bf = half2float(b);
1550   return Pack32To16(pcmp_eq(af, bf));
1551 }
1552 
1553 template<> EIGEN_STRONG_INLINE Packet16h pcmp_le(const Packet16h& a,const Packet16h& b) {
1554   return Pack32To16(pcmp_le(half2float(a), half2float(b)));
1555 }
1556 
1557 template<> EIGEN_STRONG_INLINE Packet16h pcmp_lt(const Packet16h& a,const Packet16h& b) {
1558   return Pack32To16(pcmp_lt(half2float(a), half2float(b)));
1559 }
1560 
1561 template<> EIGEN_STRONG_INLINE Packet16h pcmp_lt_or_nan(const Packet16h& a,const Packet16h& b) {
1562   return Pack32To16(pcmp_lt_or_nan(half2float(a), half2float(b)));
1563 }
1564 
1565 template<> EIGEN_STRONG_INLINE Packet16h pconj(const Packet16h& a) { return a; }
1566 
1567 template<> EIGEN_STRONG_INLINE Packet16h pnegate(const Packet16h& a) {
1568   Packet16h sign_mask = _mm256_set1_epi16(static_cast<unsigned short>(0x8000));
1569   return _mm256_xor_si256(a, sign_mask);
1570 }
1571 
1572 template<> EIGEN_STRONG_INLINE Packet16h padd<Packet16h>(const Packet16h& a, const Packet16h& b) {
1573   Packet16f af = half2float(a);
1574   Packet16f bf = half2float(b);
1575   Packet16f rf = padd(af, bf);
1576   return float2half(rf);
1577 }
1578 
1579 template<> EIGEN_STRONG_INLINE Packet16h psub<Packet16h>(const Packet16h& a, const Packet16h& b) {
1580   Packet16f af = half2float(a);
1581   Packet16f bf = half2float(b);
1582   Packet16f rf = psub(af, bf);
1583   return float2half(rf);
1584 }
1585 
1586 template<> EIGEN_STRONG_INLINE Packet16h pmul<Packet16h>(const Packet16h& a, const Packet16h& b) {
1587   Packet16f af = half2float(a);
1588   Packet16f bf = half2float(b);
1589   Packet16f rf = pmul(af, bf);
1590   return float2half(rf);
1591 }
1592 
1593 template<> EIGEN_STRONG_INLINE Packet16h pdiv<Packet16h>(const Packet16h& a, const Packet16h& b) {
1594   Packet16f af = half2float(a);
1595   Packet16f bf = half2float(b);
1596   Packet16f rf = pdiv(af, bf);
1597   return float2half(rf);
1598 }
1599 
1600 template<> EIGEN_STRONG_INLINE half predux<Packet16h>(const Packet16h& from) {
1601   Packet16f from_float = half2float(from);
1602   return half(predux(from_float));
1603 }
1604 
1605 template <>
1606 EIGEN_STRONG_INLINE Packet8h predux_half_dowto4<Packet16h>(const Packet16h& a) {
1607   Packet8h lane0 = _mm256_extractf128_si256(a, 0);
1608   Packet8h lane1 = _mm256_extractf128_si256(a, 1);
1609   return padd<Packet8h>(lane0, lane1);
1610 }
1611 
1612 template<> EIGEN_STRONG_INLINE Eigen::half predux_max<Packet16h>(const Packet16h& a) {
1613   Packet16f af = half2float(a);
1614   float reduced = predux_max<Packet16f>(af);
1615   return Eigen::half(reduced);
1616 }
1617 
1618 template<> EIGEN_STRONG_INLINE Eigen::half predux_min<Packet16h>(const Packet16h& a) {
1619   Packet16f af = half2float(a);
1620   float reduced = predux_min<Packet16f>(af);
1621   return Eigen::half(reduced);
1622 }
1623 
1624 template<> EIGEN_STRONG_INLINE half predux_mul<Packet16h>(const Packet16h& from) {
1625   Packet16f from_float = half2float(from);
1626   return half(predux_mul(from_float));
1627 }
1628 
1629 template<> EIGEN_STRONG_INLINE Packet16h preverse(const Packet16h& a)
1630 {
1631   __m128i m = _mm_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1);
1632   return _mm256_insertf128_si256(
1633                     _mm256_castsi128_si256(_mm_shuffle_epi8(_mm256_extractf128_si256(a,1),m)),
1634                                            _mm_shuffle_epi8(_mm256_extractf128_si256(a,0),m), 1);
1635 }
1636 
1637 template<> EIGEN_STRONG_INLINE Packet16h pgather<Eigen::half, Packet16h>(const Eigen::half* from, Index stride)
1638 {
1639   return _mm256_set_epi16(
1640       from[15*stride].x, from[14*stride].x, from[13*stride].x, from[12*stride].x,
1641       from[11*stride].x, from[10*stride].x, from[9*stride].x, from[8*stride].x,
1642       from[7*stride].x, from[6*stride].x, from[5*stride].x, from[4*stride].x,
1643       from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x);
1644 }
1645 
1646 template<> EIGEN_STRONG_INLINE void pscatter<half, Packet16h>(half* to, const Packet16h& from, Index stride)
1647 {
1648   EIGEN_ALIGN64 half aux[16];
1649   pstore(aux, from);
1650   to[stride*0] = aux[0];
1651   to[stride*1] = aux[1];
1652   to[stride*2] = aux[2];
1653   to[stride*3] = aux[3];
1654   to[stride*4] = aux[4];
1655   to[stride*5] = aux[5];
1656   to[stride*6] = aux[6];
1657   to[stride*7] = aux[7];
1658   to[stride*8] = aux[8];
1659   to[stride*9] = aux[9];
1660   to[stride*10] = aux[10];
1661   to[stride*11] = aux[11];
1662   to[stride*12] = aux[12];
1663   to[stride*13] = aux[13];
1664   to[stride*14] = aux[14];
1665   to[stride*15] = aux[15];
1666 }
1667 
1668 EIGEN_STRONG_INLINE void
1669 ptranspose(PacketBlock<Packet16h,16>& kernel) {
1670   __m256i a = kernel.packet[0];
1671   __m256i b = kernel.packet[1];
1672   __m256i c = kernel.packet[2];
1673   __m256i d = kernel.packet[3];
1674   __m256i e = kernel.packet[4];
1675   __m256i f = kernel.packet[5];
1676   __m256i g = kernel.packet[6];
1677   __m256i h = kernel.packet[7];
1678   __m256i i = kernel.packet[8];
1679   __m256i j = kernel.packet[9];
1680   __m256i k = kernel.packet[10];
1681   __m256i l = kernel.packet[11];
1682   __m256i m = kernel.packet[12];
1683   __m256i n = kernel.packet[13];
1684   __m256i o = kernel.packet[14];
1685   __m256i p = kernel.packet[15];
1686 
1687   __m256i ab_07 = _mm256_unpacklo_epi16(a, b);
1688   __m256i cd_07 = _mm256_unpacklo_epi16(c, d);
1689   __m256i ef_07 = _mm256_unpacklo_epi16(e, f);
1690   __m256i gh_07 = _mm256_unpacklo_epi16(g, h);
1691   __m256i ij_07 = _mm256_unpacklo_epi16(i, j);
1692   __m256i kl_07 = _mm256_unpacklo_epi16(k, l);
1693   __m256i mn_07 = _mm256_unpacklo_epi16(m, n);
1694   __m256i op_07 = _mm256_unpacklo_epi16(o, p);
1695 
1696   __m256i ab_8f = _mm256_unpackhi_epi16(a, b);
1697   __m256i cd_8f = _mm256_unpackhi_epi16(c, d);
1698   __m256i ef_8f = _mm256_unpackhi_epi16(e, f);
1699   __m256i gh_8f = _mm256_unpackhi_epi16(g, h);
1700   __m256i ij_8f = _mm256_unpackhi_epi16(i, j);
1701   __m256i kl_8f = _mm256_unpackhi_epi16(k, l);
1702   __m256i mn_8f = _mm256_unpackhi_epi16(m, n);
1703   __m256i op_8f = _mm256_unpackhi_epi16(o, p);
1704 
1705   __m256i abcd_03 = _mm256_unpacklo_epi32(ab_07, cd_07);
1706   __m256i abcd_47 = _mm256_unpackhi_epi32(ab_07, cd_07);
1707   __m256i efgh_03 = _mm256_unpacklo_epi32(ef_07, gh_07);
1708   __m256i efgh_47 = _mm256_unpackhi_epi32(ef_07, gh_07);
1709   __m256i ijkl_03 = _mm256_unpacklo_epi32(ij_07, kl_07);
1710   __m256i ijkl_47 = _mm256_unpackhi_epi32(ij_07, kl_07);
1711   __m256i mnop_03 = _mm256_unpacklo_epi32(mn_07, op_07);
1712   __m256i mnop_47 = _mm256_unpackhi_epi32(mn_07, op_07);
1713 
1714   __m256i abcd_8b = _mm256_unpacklo_epi32(ab_8f, cd_8f);
1715   __m256i abcd_cf = _mm256_unpackhi_epi32(ab_8f, cd_8f);
1716   __m256i efgh_8b = _mm256_unpacklo_epi32(ef_8f, gh_8f);
1717   __m256i efgh_cf = _mm256_unpackhi_epi32(ef_8f, gh_8f);
1718   __m256i ijkl_8b = _mm256_unpacklo_epi32(ij_8f, kl_8f);
1719   __m256i ijkl_cf = _mm256_unpackhi_epi32(ij_8f, kl_8f);
1720   __m256i mnop_8b = _mm256_unpacklo_epi32(mn_8f, op_8f);
1721   __m256i mnop_cf = _mm256_unpackhi_epi32(mn_8f, op_8f);
1722 
1723   __m256i abcdefgh_01 = _mm256_unpacklo_epi64(abcd_03, efgh_03);
1724   __m256i abcdefgh_23 = _mm256_unpackhi_epi64(abcd_03, efgh_03);
1725   __m256i ijklmnop_01 = _mm256_unpacklo_epi64(ijkl_03, mnop_03);
1726   __m256i ijklmnop_23 = _mm256_unpackhi_epi64(ijkl_03, mnop_03);
1727   __m256i abcdefgh_45 = _mm256_unpacklo_epi64(abcd_47, efgh_47);
1728   __m256i abcdefgh_67 = _mm256_unpackhi_epi64(abcd_47, efgh_47);
1729   __m256i ijklmnop_45 = _mm256_unpacklo_epi64(ijkl_47, mnop_47);
1730   __m256i ijklmnop_67 = _mm256_unpackhi_epi64(ijkl_47, mnop_47);
1731   __m256i abcdefgh_89 = _mm256_unpacklo_epi64(abcd_8b, efgh_8b);
1732   __m256i abcdefgh_ab = _mm256_unpackhi_epi64(abcd_8b, efgh_8b);
1733   __m256i ijklmnop_89 = _mm256_unpacklo_epi64(ijkl_8b, mnop_8b);
1734   __m256i ijklmnop_ab = _mm256_unpackhi_epi64(ijkl_8b, mnop_8b);
1735   __m256i abcdefgh_cd = _mm256_unpacklo_epi64(abcd_cf, efgh_cf);
1736   __m256i abcdefgh_ef = _mm256_unpackhi_epi64(abcd_cf, efgh_cf);
1737   __m256i ijklmnop_cd = _mm256_unpacklo_epi64(ijkl_cf, mnop_cf);
1738   __m256i ijklmnop_ef = _mm256_unpackhi_epi64(ijkl_cf, mnop_cf);
1739 
1740   // NOTE: no unpacklo/hi instr in this case, so using permute instr.
1741   __m256i a_p_0 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x20);
1742   __m256i a_p_1 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x20);
1743   __m256i a_p_2 = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x20);
1744   __m256i a_p_3 = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x20);
1745   __m256i a_p_4 = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x20);
1746   __m256i a_p_5 = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x20);
1747   __m256i a_p_6 = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x20);
1748   __m256i a_p_7 = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x20);
1749   __m256i a_p_8 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x31);
1750   __m256i a_p_9 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x31);
1751   __m256i a_p_a = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x31);
1752   __m256i a_p_b = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x31);
1753   __m256i a_p_c = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x31);
1754   __m256i a_p_d = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x31);
1755   __m256i a_p_e = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x31);
1756   __m256i a_p_f = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x31);
1757 
1758   kernel.packet[0] = a_p_0;
1759   kernel.packet[1] = a_p_1;
1760   kernel.packet[2] = a_p_2;
1761   kernel.packet[3] = a_p_3;
1762   kernel.packet[4] = a_p_4;
1763   kernel.packet[5] = a_p_5;
1764   kernel.packet[6] = a_p_6;
1765   kernel.packet[7] = a_p_7;
1766   kernel.packet[8] = a_p_8;
1767   kernel.packet[9] = a_p_9;
1768   kernel.packet[10] = a_p_a;
1769   kernel.packet[11] = a_p_b;
1770   kernel.packet[12] = a_p_c;
1771   kernel.packet[13] = a_p_d;
1772   kernel.packet[14] = a_p_e;
1773   kernel.packet[15] = a_p_f;
1774 }
1775 
1776 EIGEN_STRONG_INLINE void
1777 ptranspose(PacketBlock<Packet16h,8>& kernel) {
1778   EIGEN_ALIGN64 half in[8][16];
1779   pstore<half>(in[0], kernel.packet[0]);
1780   pstore<half>(in[1], kernel.packet[1]);
1781   pstore<half>(in[2], kernel.packet[2]);
1782   pstore<half>(in[3], kernel.packet[3]);
1783   pstore<half>(in[4], kernel.packet[4]);
1784   pstore<half>(in[5], kernel.packet[5]);
1785   pstore<half>(in[6], kernel.packet[6]);
1786   pstore<half>(in[7], kernel.packet[7]);
1787 
1788   EIGEN_ALIGN64 half out[8][16];
1789 
1790   for (int i = 0; i < 8; ++i) {
1791     for (int j = 0; j < 8; ++j) {
1792       out[i][j] = in[j][2*i];
1793     }
1794     for (int j = 0; j < 8; ++j) {
1795       out[i][j+8] = in[j][2*i+1];
1796     }
1797   }
1798 
1799   kernel.packet[0] = pload<Packet16h>(out[0]);
1800   kernel.packet[1] = pload<Packet16h>(out[1]);
1801   kernel.packet[2] = pload<Packet16h>(out[2]);
1802   kernel.packet[3] = pload<Packet16h>(out[3]);
1803   kernel.packet[4] = pload<Packet16h>(out[4]);
1804   kernel.packet[5] = pload<Packet16h>(out[5]);
1805   kernel.packet[6] = pload<Packet16h>(out[6]);
1806   kernel.packet[7] = pload<Packet16h>(out[7]);
1807 }
1808 
1809 EIGEN_STRONG_INLINE void
1810 ptranspose(PacketBlock<Packet16h,4>& kernel) {
1811   EIGEN_ALIGN64 half in[4][16];
1812   pstore<half>(in[0], kernel.packet[0]);
1813   pstore<half>(in[1], kernel.packet[1]);
1814   pstore<half>(in[2], kernel.packet[2]);
1815   pstore<half>(in[3], kernel.packet[3]);
1816 
1817   EIGEN_ALIGN64 half out[4][16];
1818 
1819   for (int i = 0; i < 4; ++i) {
1820     for (int j = 0; j < 4; ++j) {
1821       out[i][j] = in[j][4*i];
1822     }
1823     for (int j = 0; j < 4; ++j) {
1824       out[i][j+4] = in[j][4*i+1];
1825     }
1826     for (int j = 0; j < 4; ++j) {
1827       out[i][j+8] = in[j][4*i+2];
1828     }
1829     for (int j = 0; j < 4; ++j) {
1830       out[i][j+12] = in[j][4*i+3];
1831     }
1832   }
1833 
1834   kernel.packet[0] = pload<Packet16h>(out[0]);
1835   kernel.packet[1] = pload<Packet16h>(out[1]);
1836   kernel.packet[2] = pload<Packet16h>(out[2]);
1837   kernel.packet[3] = pload<Packet16h>(out[3]);
1838 }
1839 
1840 template <> struct is_arithmetic<Packet16bf> { enum { value = true }; };
1841 
1842 template <>
1843 struct packet_traits<bfloat16> : default_packet_traits {
1844   typedef Packet16bf type;
1845   typedef Packet8bf half;
1846   enum {
1847     Vectorizable = 1,
1848     AlignedOnScalar = 1,
1849     size = 16,
1850     HasHalfPacket = 1,
1851     HasBlend = 0,
1852     HasInsert = 1,
1853     HasSin = EIGEN_FAST_MATH,
1854     HasCos = EIGEN_FAST_MATH,
1855 #if EIGEN_GNUC_AT_LEAST(5, 3) || (!EIGEN_COMP_GNUC_STRICT)
1856 #ifdef EIGEN_VECTORIZE_AVX512DQ
1857     HasLog = 1,  // Currently fails test with bad accuracy.
1858     HasLog1p  = 1,
1859     HasExpm1  = 1,
1860     HasNdtri = 1,
1861     HasBessel  = 1,
1862 #endif
1863     HasExp = 1,
1864     HasSqrt = EIGEN_FAST_MATH,
1865     HasRsqrt = EIGEN_FAST_MATH,
1866     HasTanh = EIGEN_FAST_MATH,
1867     HasErf = EIGEN_FAST_MATH,
1868 #endif
1869     HasCmp  = 1,
1870     HasDiv = 1
1871   };
1872 };
1873 
1874 template <>
1875 struct unpacket_traits<Packet16bf>
1876 {
1877   typedef bfloat16 type;
1878   enum {size=16, alignment=Aligned32, vectorizable=true, masked_load_available=false, masked_store_available=false};
1879   typedef Packet8bf half;
1880 };
1881 
1882 template <>
1883 EIGEN_STRONG_INLINE Packet16bf pset1<Packet16bf>(const bfloat16& from) {
1884   return _mm256_set1_epi16(from.value);
1885 }
1886 
1887 template <>
1888 EIGEN_STRONG_INLINE bfloat16 pfirst<Packet16bf>(const Packet16bf& from) {
1889   bfloat16 t;
1890   t.value = static_cast<unsigned short>(_mm256_extract_epi16(from, 0));
1891   return t;
1892 }
1893 
1894 template <>
1895 EIGEN_STRONG_INLINE Packet16bf pload<Packet16bf>(const bfloat16* from) {
1896   return _mm256_load_si256(reinterpret_cast<const __m256i*>(from));
1897 }
1898 
1899 template <>
1900 EIGEN_STRONG_INLINE Packet16bf ploadu<Packet16bf>(const bfloat16* from) {
1901   return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from));
1902 }
1903 
1904 template <>
1905 EIGEN_STRONG_INLINE void pstore<bfloat16>(bfloat16* to,
1906                                           const Packet16bf& from) {
1907   _mm256_store_si256(reinterpret_cast<__m256i*>(to), from);
1908 }
1909 
1910 template <>
1911 EIGEN_STRONG_INLINE void pstoreu<bfloat16>(bfloat16* to,
1912                                            const Packet16bf& from) {
1913   _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from);
1914 }
1915 
1916 template<> EIGEN_STRONG_INLINE Packet16bf
1917 ploaddup<Packet16bf>(const bfloat16* from) {
1918   Packet16bf r;
1919   unsigned short a = from[0].value;
1920   unsigned short b = from[1].value;
1921   unsigned short c = from[2].value;
1922   unsigned short d = from[3].value;
1923   unsigned short e = from[4].value;
1924   unsigned short f = from[5].value;
1925   unsigned short g = from[6].value;
1926   unsigned short h = from[7].value;
1927   return _mm256_set_epi16(h, h, g, g, f, f, e, e, d, d, c, c, b, b, a, a);
1928 }
1929 
1930 template<> EIGEN_STRONG_INLINE Packet16bf
1931 ploadquad(const bfloat16* from) {
1932   Packet16bf r;
1933   unsigned short a = from[0].value;
1934   unsigned short b = from[1].value;
1935   unsigned short c = from[2].value;
1936   unsigned short d = from[3].value;
1937   return _mm256_set_epi16(d, d, d, d, c, c, c, c, b, b, b, b, a, a, a, a);
1938 }
1939 
1940 EIGEN_STRONG_INLINE Packet16f Bf16ToF32(const Packet16bf& a) {
1941   return _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepu16_epi32(a), 16));
1942 }
1943 
1944 // Convert float to bfloat16 according to round-to-nearest-even/denormals algorithm.
1945 EIGEN_STRONG_INLINE Packet16bf F32ToBf16(const Packet16f& a) {
1946   Packet16bf r;
1947 
1948 #if defined(EIGEN_VECTORIZE_AVX512BF16) && EIGEN_GNUC_AT_LEAST(10, 1)
1949   // Since GCC 10.1 supports avx512bf16 and C style explicit cast
1950   // (C++ static_cast is not supported yet), do converion via intrinsic
1951   // and register path for performance.
1952   r = (__m256i)(_mm512_cvtneps_pbh(a));
1953 
1954 #else
1955   __m512i t;
1956   __m512i input = _mm512_castps_si512(a);
1957   __m512i nan = _mm512_set1_epi32(0x7fc0);
1958 
1959   // uint32_t lsb = (input >> 16) & 1;
1960   t = _mm512_and_si512(_mm512_srli_epi32(input, 16), _mm512_set1_epi32(1));
1961   // uint32_t rounding_bias = 0x7fff + lsb;
1962   t = _mm512_add_epi32(t, _mm512_set1_epi32(0x7fff));
1963   // input += rounding_bias;
1964   t = _mm512_add_epi32(t, input);
1965   // input = input >> 16;
1966   t = _mm512_srli_epi32(t, 16);
1967 
1968   // Check NaN before converting back to bf16
1969   __mmask16 mask = _mm512_cmp_ps_mask(a, a, _CMP_ORD_Q);
1970 
1971   t = _mm512_mask_blend_epi32(mask, nan, t);
1972   // output.value = static_cast<uint16_t>(input);
1973   r = _mm512_cvtepi32_epi16(t);
1974 #endif // EIGEN_VECTORIZE_AVX512BF16
1975 
1976   return r;
1977 }
1978 
1979 template <>
1980 EIGEN_STRONG_INLINE Packet16bf ptrue(const Packet16bf& a) {
1981   return ptrue<Packet8i>(a);
1982 }
1983 
1984 template <>
1985 EIGEN_STRONG_INLINE Packet16bf por(const Packet16bf& a, const Packet16bf& b) {
1986   return por<Packet8i>(a, b);
1987 }
1988 
1989 template <>
1990 EIGEN_STRONG_INLINE Packet16bf pxor(const Packet16bf& a, const Packet16bf& b) {
1991   return pxor<Packet8i>(a, b);
1992 }
1993 
1994 template <>
1995 EIGEN_STRONG_INLINE Packet16bf pand(const Packet16bf& a, const Packet16bf& b) {
1996   return pand<Packet8i>(a, b);
1997 }
1998 
1999 template <>
2000 EIGEN_STRONG_INLINE Packet16bf pandnot(const Packet16bf& a,
2001                                        const Packet16bf& b) {
2002   return pandnot<Packet8i>(a, b);
2003 }
2004 
2005 template <>
2006 EIGEN_STRONG_INLINE Packet16bf pselect(const Packet16bf& mask,
2007                                        const Packet16bf& a,
2008                                        const Packet16bf& b) {
2009   // Input mask is expected to be all 0/1, handle it with 8-bit
2010   // intrinsic for performance.
2011   return _mm256_blendv_epi8(b, a, mask);
2012 }
2013 
2014 template<> EIGEN_STRONG_INLINE Packet16bf pround<Packet16bf>(const Packet16bf& a)
2015 {
2016   return F32ToBf16(pround<Packet16f>(Bf16ToF32(a)));
2017 }
2018 
2019 template<> EIGEN_STRONG_INLINE Packet16bf print<Packet16bf>(const Packet16bf& a) {
2020   return F32ToBf16(print<Packet16f>(Bf16ToF32(a)));
2021 }
2022 
2023 template<> EIGEN_STRONG_INLINE Packet16bf pceil<Packet16bf>(const Packet16bf& a) {
2024   return F32ToBf16(pceil<Packet16f>(Bf16ToF32(a)));
2025 }
2026 
2027 template<> EIGEN_STRONG_INLINE Packet16bf pfloor<Packet16bf>(const Packet16bf& a) {
2028   return F32ToBf16(pfloor<Packet16f>(Bf16ToF32(a)));
2029 }
2030 
2031 template <>
2032 EIGEN_STRONG_INLINE Packet16bf pcmp_eq(const Packet16bf& a,
2033                                        const Packet16bf& b) {
2034   return Pack32To16(pcmp_eq(Bf16ToF32(a), Bf16ToF32(b)));
2035 }
2036 
2037 template <>
2038 EIGEN_STRONG_INLINE Packet16bf pcmp_le(const Packet16bf& a,
2039                                        const Packet16bf& b) {
2040   return Pack32To16(pcmp_le(Bf16ToF32(a), Bf16ToF32(b)));
2041 }
2042 
2043 template <>
2044 EIGEN_STRONG_INLINE Packet16bf pcmp_lt(const Packet16bf& a,
2045                                        const Packet16bf& b) {
2046   return Pack32To16(pcmp_lt(Bf16ToF32(a), Bf16ToF32(b)));
2047 }
2048 
2049 template <>
2050 EIGEN_STRONG_INLINE Packet16bf pcmp_lt_or_nan(const Packet16bf& a,
2051                                               const Packet16bf& b) {
2052   return Pack32To16(pcmp_lt_or_nan(Bf16ToF32(a), Bf16ToF32(b)));
2053 }
2054 
2055 template <>
2056 EIGEN_STRONG_INLINE Packet16bf pnegate(const Packet16bf& a) {
2057   Packet16bf sign_mask = _mm256_set1_epi16(static_cast<unsigned short>(0x8000));
2058   return _mm256_xor_si256(a, sign_mask);
2059 }
2060 
2061 template <>
2062 EIGEN_STRONG_INLINE Packet16bf pconj(const Packet16bf& a) {
2063   return a;
2064 }
2065 
2066 template <>
2067 EIGEN_STRONG_INLINE Packet16bf pabs(const Packet16bf& a) {
2068   const __m256i sign_mask = _mm256_set1_epi16(static_cast<numext::uint16_t>(0x8000));
2069   return _mm256_andnot_si256(sign_mask, a);
2070 }
2071 
2072 template <>
2073 EIGEN_STRONG_INLINE Packet16bf padd<Packet16bf>(const Packet16bf& a,
2074                                                 const Packet16bf& b) {
2075   return F32ToBf16(padd<Packet16f>(Bf16ToF32(a), Bf16ToF32(b)));
2076 }
2077 
2078 template <>
2079 EIGEN_STRONG_INLINE Packet16bf psub<Packet16bf>(const Packet16bf& a,
2080                                                 const Packet16bf& b) {
2081   return F32ToBf16(psub<Packet16f>(Bf16ToF32(a), Bf16ToF32(b)));
2082 }
2083 
2084 template <>
2085 EIGEN_STRONG_INLINE Packet16bf pmul<Packet16bf>(const Packet16bf& a,
2086                                                 const Packet16bf& b) {
2087   return F32ToBf16(pmul<Packet16f>(Bf16ToF32(a), Bf16ToF32(b)));
2088 }
2089 
2090 template <>
2091 EIGEN_STRONG_INLINE Packet16bf pdiv<Packet16bf>(const Packet16bf& a,
2092                                                 const Packet16bf& b) {
2093   return F32ToBf16(pdiv<Packet16f>(Bf16ToF32(a), Bf16ToF32(b)));
2094 }
2095 
2096 template <>
2097 EIGEN_STRONG_INLINE Packet16bf pmin<Packet16bf>(const Packet16bf& a,
2098                                                 const Packet16bf& b) {
2099   return F32ToBf16(pmin<Packet16f>(Bf16ToF32(a), Bf16ToF32(b)));
2100 }
2101 
2102 template <>
2103 EIGEN_STRONG_INLINE Packet16bf pmax<Packet16bf>(const Packet16bf& a,
2104                                                 const Packet16bf& b) {
2105   return F32ToBf16(pmax<Packet16f>(Bf16ToF32(a), Bf16ToF32(b)));
2106 }
2107 
2108 template <>
2109 EIGEN_STRONG_INLINE Packet16bf plset<Packet16bf>(const bfloat16& a) {
2110   return F32ToBf16(plset<Packet16f>(static_cast<float>(a)));
2111 }
2112 
2113 template <>
2114 EIGEN_STRONG_INLINE Packet8bf predux_half_dowto4<Packet16bf>(const Packet16bf& a) {
2115   Packet8bf lane0 = _mm256_extractf128_si256(a, 0);
2116   Packet8bf lane1 = _mm256_extractf128_si256(a, 1);
2117   return padd<Packet8bf>(lane0, lane1);
2118 }
2119 
2120 template <>
2121 EIGEN_STRONG_INLINE bfloat16 predux<Packet16bf>(const Packet16bf& p) {
2122   return static_cast<bfloat16>(predux<Packet16f>(Bf16ToF32(p)));
2123 }
2124 
2125 template <>
2126 EIGEN_STRONG_INLINE bfloat16 predux_mul<Packet16bf>(const Packet16bf& from) {
2127   return static_cast<bfloat16>(predux_mul<Packet16f>(Bf16ToF32(from)));
2128 }
2129 
2130 template <>
2131 EIGEN_STRONG_INLINE bfloat16 predux_min<Packet16bf>(const Packet16bf& from) {
2132   return static_cast<bfloat16>(predux_min<Packet16f>(Bf16ToF32(from)));
2133 }
2134 
2135 template <>
2136 EIGEN_STRONG_INLINE bfloat16 predux_max<Packet16bf>(const Packet16bf& from) {
2137   return static_cast<bfloat16>(predux_max<Packet16f>(Bf16ToF32(from)));
2138 }
2139 
2140 template <>
2141 EIGEN_STRONG_INLINE Packet16bf preverse(const Packet16bf& a) {
2142   __m256i m = _mm256_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1,
2143                                14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1);
2144 
2145   Packet16bf res;
2146   // Swap hi and lo first because shuffle is in 128-bit lanes.
2147   res = _mm256_permute2x128_si256(a, a, 1);
2148   // Shuffle 8-bit values in src within 2*128-bit lanes.
2149   return _mm256_shuffle_epi8(res, m);
2150 }
2151 
2152 template <>
2153 EIGEN_STRONG_INLINE Packet16bf pgather<bfloat16, Packet16bf>(const bfloat16* from,
2154                                                              Index stride) {
2155   return _mm256_set_epi16(
2156       from[15*stride].value, from[14*stride].value, from[13*stride].value, from[12*stride].value,
2157       from[11*stride].value, from[10*stride].value, from[9*stride].value, from[8*stride].value,
2158       from[7*stride].value, from[6*stride].value, from[5*stride].value, from[4*stride].value,
2159       from[3*stride].value, from[2*stride].value, from[1*stride].value, from[0*stride].value);
2160 }
2161 
2162 template <>
2163 EIGEN_STRONG_INLINE void pscatter<bfloat16, Packet16bf>(bfloat16* to,
2164                                                         const Packet16bf& from,
2165                                                         Index stride) {
2166   EIGEN_ALIGN64 bfloat16 aux[16];
2167   pstore(aux, from);
2168   to[stride*0] = aux[0];
2169   to[stride*1] = aux[1];
2170   to[stride*2] = aux[2];
2171   to[stride*3] = aux[3];
2172   to[stride*4] = aux[4];
2173   to[stride*5] = aux[5];
2174   to[stride*6] = aux[6];
2175   to[stride*7] = aux[7];
2176   to[stride*8] = aux[8];
2177   to[stride*9] = aux[9];
2178   to[stride*10] = aux[10];
2179   to[stride*11] = aux[11];
2180   to[stride*12] = aux[12];
2181   to[stride*13] = aux[13];
2182   to[stride*14] = aux[14];
2183   to[stride*15] = aux[15];
2184 }
2185 
2186 EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet16bf,16>& kernel) {
2187   __m256i a = kernel.packet[0];
2188   __m256i b = kernel.packet[1];
2189   __m256i c = kernel.packet[2];
2190   __m256i d = kernel.packet[3];
2191   __m256i e = kernel.packet[4];
2192   __m256i f = kernel.packet[5];
2193   __m256i g = kernel.packet[6];
2194   __m256i h = kernel.packet[7];
2195   __m256i i = kernel.packet[8];
2196   __m256i j = kernel.packet[9];
2197   __m256i k = kernel.packet[10];
2198   __m256i l = kernel.packet[11];
2199   __m256i m = kernel.packet[12];
2200   __m256i n = kernel.packet[13];
2201   __m256i o = kernel.packet[14];
2202   __m256i p = kernel.packet[15];
2203 
2204   __m256i ab_07 = _mm256_unpacklo_epi16(a, b);
2205   __m256i cd_07 = _mm256_unpacklo_epi16(c, d);
2206   __m256i ef_07 = _mm256_unpacklo_epi16(e, f);
2207   __m256i gh_07 = _mm256_unpacklo_epi16(g, h);
2208   __m256i ij_07 = _mm256_unpacklo_epi16(i, j);
2209   __m256i kl_07 = _mm256_unpacklo_epi16(k, l);
2210   __m256i mn_07 = _mm256_unpacklo_epi16(m, n);
2211   __m256i op_07 = _mm256_unpacklo_epi16(o, p);
2212 
2213   __m256i ab_8f = _mm256_unpackhi_epi16(a, b);
2214   __m256i cd_8f = _mm256_unpackhi_epi16(c, d);
2215   __m256i ef_8f = _mm256_unpackhi_epi16(e, f);
2216   __m256i gh_8f = _mm256_unpackhi_epi16(g, h);
2217   __m256i ij_8f = _mm256_unpackhi_epi16(i, j);
2218   __m256i kl_8f = _mm256_unpackhi_epi16(k, l);
2219   __m256i mn_8f = _mm256_unpackhi_epi16(m, n);
2220   __m256i op_8f = _mm256_unpackhi_epi16(o, p);
2221 
2222   __m256i abcd_03 = _mm256_unpacklo_epi32(ab_07, cd_07);
2223   __m256i abcd_47 = _mm256_unpackhi_epi32(ab_07, cd_07);
2224   __m256i efgh_03 = _mm256_unpacklo_epi32(ef_07, gh_07);
2225   __m256i efgh_47 = _mm256_unpackhi_epi32(ef_07, gh_07);
2226   __m256i ijkl_03 = _mm256_unpacklo_epi32(ij_07, kl_07);
2227   __m256i ijkl_47 = _mm256_unpackhi_epi32(ij_07, kl_07);
2228   __m256i mnop_03 = _mm256_unpacklo_epi32(mn_07, op_07);
2229   __m256i mnop_47 = _mm256_unpackhi_epi32(mn_07, op_07);
2230 
2231   __m256i abcd_8b = _mm256_unpacklo_epi32(ab_8f, cd_8f);
2232   __m256i abcd_cf = _mm256_unpackhi_epi32(ab_8f, cd_8f);
2233   __m256i efgh_8b = _mm256_unpacklo_epi32(ef_8f, gh_8f);
2234   __m256i efgh_cf = _mm256_unpackhi_epi32(ef_8f, gh_8f);
2235   __m256i ijkl_8b = _mm256_unpacklo_epi32(ij_8f, kl_8f);
2236   __m256i ijkl_cf = _mm256_unpackhi_epi32(ij_8f, kl_8f);
2237   __m256i mnop_8b = _mm256_unpacklo_epi32(mn_8f, op_8f);
2238   __m256i mnop_cf = _mm256_unpackhi_epi32(mn_8f, op_8f);
2239 
2240   __m256i abcdefgh_01 = _mm256_unpacklo_epi64(abcd_03, efgh_03);
2241   __m256i abcdefgh_23 = _mm256_unpackhi_epi64(abcd_03, efgh_03);
2242   __m256i ijklmnop_01 = _mm256_unpacklo_epi64(ijkl_03, mnop_03);
2243   __m256i ijklmnop_23 = _mm256_unpackhi_epi64(ijkl_03, mnop_03);
2244   __m256i abcdefgh_45 = _mm256_unpacklo_epi64(abcd_47, efgh_47);
2245   __m256i abcdefgh_67 = _mm256_unpackhi_epi64(abcd_47, efgh_47);
2246   __m256i ijklmnop_45 = _mm256_unpacklo_epi64(ijkl_47, mnop_47);
2247   __m256i ijklmnop_67 = _mm256_unpackhi_epi64(ijkl_47, mnop_47);
2248   __m256i abcdefgh_89 = _mm256_unpacklo_epi64(abcd_8b, efgh_8b);
2249   __m256i abcdefgh_ab = _mm256_unpackhi_epi64(abcd_8b, efgh_8b);
2250   __m256i ijklmnop_89 = _mm256_unpacklo_epi64(ijkl_8b, mnop_8b);
2251   __m256i ijklmnop_ab = _mm256_unpackhi_epi64(ijkl_8b, mnop_8b);
2252   __m256i abcdefgh_cd = _mm256_unpacklo_epi64(abcd_cf, efgh_cf);
2253   __m256i abcdefgh_ef = _mm256_unpackhi_epi64(abcd_cf, efgh_cf);
2254   __m256i ijklmnop_cd = _mm256_unpacklo_epi64(ijkl_cf, mnop_cf);
2255   __m256i ijklmnop_ef = _mm256_unpackhi_epi64(ijkl_cf, mnop_cf);
2256 
2257   // NOTE: no unpacklo/hi instr in this case, so using permute instr.
2258   kernel.packet[0] = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x20);
2259   kernel.packet[1] = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x20);
2260   kernel.packet[2] = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x20);
2261   kernel.packet[3] = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x20);
2262   kernel.packet[4] = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x20);
2263   kernel.packet[5] = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x20);
2264   kernel.packet[6] = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x20);
2265   kernel.packet[7] = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x20);
2266   kernel.packet[8] = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x31);
2267   kernel.packet[9] = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x31);
2268   kernel.packet[10] = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x31);
2269   kernel.packet[11] = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x31);
2270   kernel.packet[12] = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x31);
2271   kernel.packet[13] = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x31);
2272   kernel.packet[14] = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x31);
2273   kernel.packet[15] = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x31);
2274 }
2275 
2276 EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet16bf,4>& kernel) {
2277   __m256i a = kernel.packet[0];
2278   __m256i b = kernel.packet[1];
2279   __m256i c = kernel.packet[2];
2280   __m256i d = kernel.packet[3];
2281 
2282   __m256i ab_07 = _mm256_unpacklo_epi16(a, b);
2283   __m256i cd_07 = _mm256_unpacklo_epi16(c, d);
2284   __m256i ab_8f = _mm256_unpackhi_epi16(a, b);
2285   __m256i cd_8f = _mm256_unpackhi_epi16(c, d);
2286 
2287   __m256i abcd_03 = _mm256_unpacklo_epi32(ab_07, cd_07);
2288   __m256i abcd_47 = _mm256_unpackhi_epi32(ab_07, cd_07);
2289   __m256i abcd_8b = _mm256_unpacklo_epi32(ab_8f, cd_8f);
2290   __m256i abcd_cf = _mm256_unpackhi_epi32(ab_8f, cd_8f);
2291 
2292   // NOTE: no unpacklo/hi instr in this case, so using permute instr.
2293   kernel.packet[0] = _mm256_permute2x128_si256(abcd_03, abcd_47, 0x20);
2294   kernel.packet[1] = _mm256_permute2x128_si256(abcd_8b, abcd_cf, 0x20);
2295   kernel.packet[2] = _mm256_permute2x128_si256(abcd_03, abcd_47, 0x31);
2296   kernel.packet[3] = _mm256_permute2x128_si256(abcd_8b, abcd_cf, 0x31);
2297 }
2298 
2299 } // end namespace internal
2300 
2301 } // end namespace Eigen
2302 
2303 #endif // EIGEN_PACKET_MATH_AVX512_H
2304