1 /* Copyright (C) 2014-2020 Free Software Foundation, Inc.
2 
3    This file is part of GCC.
4 
5    GCC is free software; you can redistribute it and/or modify
6    it under the terms of the GNU General Public License as published by
7    the Free Software Foundation; either version 3, or (at your option)
8    any later version.
9 
10    GCC is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13    GNU General Public License for more details.
14 
15    Under Section 7 of GPL version 3, you are granted additional
16    permissions described in the GCC Runtime Library Exception, version
17    3.1, as published by the Free Software Foundation.
18 
19    You should have received a copy of the GNU General Public License and
20    a copy of the GCC Runtime Library Exception along with this program;
21    see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
22    <http://www.gnu.org/licenses/>.  */
23 
24 #ifndef _IMMINTRIN_H_INCLUDED
25 #error "Never use <avx512dqintrin.h> directly; include <immintrin.h> instead."
26 #endif
27 
28 #ifndef _AVX512DQINTRIN_H_INCLUDED
29 #define _AVX512DQINTRIN_H_INCLUDED
30 
31 #ifndef __AVX512DQ__
32 #pragma GCC push_options
33 #pragma GCC target("avx512dq")
34 #define __DISABLE_AVX512DQ__
35 #endif /* __AVX512DQ__ */
36 
37 extern __inline unsigned char
38 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_ktest_mask8_u8(__mmask8 __A,__mmask8 __B,unsigned char * __CF)39 _ktest_mask8_u8  (__mmask8 __A,  __mmask8 __B, unsigned char *__CF)
40 {
41   *__CF = (unsigned char) __builtin_ia32_ktestcqi (__A, __B);
42   return (unsigned char) __builtin_ia32_ktestzqi (__A, __B);
43 }
44 
45 extern __inline unsigned char
46 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_ktestz_mask8_u8(__mmask8 __A,__mmask8 __B)47 _ktestz_mask8_u8 (__mmask8 __A, __mmask8 __B)
48 {
49   return (unsigned char) __builtin_ia32_ktestzqi (__A, __B);
50 }
51 
52 extern __inline unsigned char
53 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_ktestc_mask8_u8(__mmask8 __A,__mmask8 __B)54 _ktestc_mask8_u8 (__mmask8 __A, __mmask8 __B)
55 {
56   return (unsigned char) __builtin_ia32_ktestcqi (__A, __B);
57 }
58 
59 extern __inline unsigned char
60 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_ktest_mask16_u8(__mmask16 __A,__mmask16 __B,unsigned char * __CF)61 _ktest_mask16_u8  (__mmask16 __A,  __mmask16 __B, unsigned char *__CF)
62 {
63   *__CF = (unsigned char) __builtin_ia32_ktestchi (__A, __B);
64   return (unsigned char) __builtin_ia32_ktestzhi (__A, __B);
65 }
66 
67 extern __inline unsigned char
68 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_ktestz_mask16_u8(__mmask16 __A,__mmask16 __B)69 _ktestz_mask16_u8 (__mmask16 __A, __mmask16 __B)
70 {
71   return (unsigned char) __builtin_ia32_ktestzhi (__A, __B);
72 }
73 
74 extern __inline unsigned char
75 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_ktestc_mask16_u8(__mmask16 __A,__mmask16 __B)76 _ktestc_mask16_u8 (__mmask16 __A, __mmask16 __B)
77 {
78   return (unsigned char) __builtin_ia32_ktestchi (__A, __B);
79 }
80 
81 extern __inline unsigned char
82 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_kortest_mask8_u8(__mmask8 __A,__mmask8 __B,unsigned char * __CF)83 _kortest_mask8_u8  (__mmask8 __A,  __mmask8 __B, unsigned char *__CF)
84 {
85   *__CF = (unsigned char) __builtin_ia32_kortestcqi (__A, __B);
86   return (unsigned char) __builtin_ia32_kortestzqi (__A, __B);
87 }
88 
89 extern __inline unsigned char
90 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_kortestz_mask8_u8(__mmask8 __A,__mmask8 __B)91 _kortestz_mask8_u8 (__mmask8 __A, __mmask8 __B)
92 {
93   return (unsigned char) __builtin_ia32_kortestzqi (__A, __B);
94 }
95 
96 extern __inline unsigned char
97 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_kortestc_mask8_u8(__mmask8 __A,__mmask8 __B)98 _kortestc_mask8_u8 (__mmask8 __A, __mmask8 __B)
99 {
100   return (unsigned char) __builtin_ia32_kortestcqi (__A, __B);
101 }
102 
103 extern __inline __mmask8
104 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_kadd_mask8(__mmask8 __A,__mmask8 __B)105 _kadd_mask8 (__mmask8 __A, __mmask8 __B)
106 {
107   return (__mmask8) __builtin_ia32_kaddqi ((__mmask8) __A, (__mmask8) __B);
108 }
109 
110 extern __inline __mmask16
111 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_kadd_mask16(__mmask16 __A,__mmask16 __B)112 _kadd_mask16 (__mmask16 __A, __mmask16 __B)
113 {
114   return (__mmask16) __builtin_ia32_kaddhi ((__mmask16) __A, (__mmask16) __B);
115 }
116 
117 extern __inline unsigned int
118 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_cvtmask8_u32(__mmask8 __A)119 _cvtmask8_u32 (__mmask8 __A)
120 {
121   return (unsigned int) __builtin_ia32_kmovb ((__mmask8 ) __A);
122 }
123 
124 extern __inline __mmask8
125 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_cvtu32_mask8(unsigned int __A)126 _cvtu32_mask8 (unsigned int __A)
127 {
128   return (__mmask8) __builtin_ia32_kmovb ((__mmask8) __A);
129 }
130 
131 extern __inline __mmask8
132 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_load_mask8(__mmask8 * __A)133 _load_mask8 (__mmask8 *__A)
134 {
135   return (__mmask8) __builtin_ia32_kmovb (*(__mmask8 *) __A);
136 }
137 
138 extern __inline void
139 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_store_mask8(__mmask8 * __A,__mmask8 __B)140 _store_mask8 (__mmask8 *__A, __mmask8 __B)
141 {
142   *(__mmask8 *) __A = __builtin_ia32_kmovb (__B);
143 }
144 
145 extern __inline __mmask8
146 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_knot_mask8(__mmask8 __A)147 _knot_mask8 (__mmask8 __A)
148 {
149   return (__mmask8) __builtin_ia32_knotqi ((__mmask8) __A);
150 }
151 
152 extern __inline __mmask8
153 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_kor_mask8(__mmask8 __A,__mmask8 __B)154 _kor_mask8 (__mmask8 __A, __mmask8 __B)
155 {
156   return (__mmask8) __builtin_ia32_korqi ((__mmask8) __A, (__mmask8) __B);
157 }
158 
159 extern __inline __mmask8
160 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_kxnor_mask8(__mmask8 __A,__mmask8 __B)161 _kxnor_mask8 (__mmask8 __A, __mmask8 __B)
162 {
163   return (__mmask8) __builtin_ia32_kxnorqi ((__mmask8) __A, (__mmask8) __B);
164 }
165 
166 extern __inline __mmask8
167 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_kxor_mask8(__mmask8 __A,__mmask8 __B)168 _kxor_mask8 (__mmask8 __A, __mmask8 __B)
169 {
170   return (__mmask8) __builtin_ia32_kxorqi ((__mmask8) __A, (__mmask8) __B);
171 }
172 
173 extern __inline __mmask8
174 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_kand_mask8(__mmask8 __A,__mmask8 __B)175 _kand_mask8 (__mmask8 __A, __mmask8 __B)
176 {
177   return (__mmask8) __builtin_ia32_kandqi ((__mmask8) __A, (__mmask8) __B);
178 }
179 
180 extern __inline __mmask8
181 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_kandn_mask8(__mmask8 __A,__mmask8 __B)182 _kandn_mask8 (__mmask8 __A, __mmask8 __B)
183 {
184   return (__mmask8) __builtin_ia32_kandnqi ((__mmask8) __A, (__mmask8) __B);
185 }
186 
187 extern __inline __m512d
188 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_broadcast_f64x2(__m128d __A)189 _mm512_broadcast_f64x2 (__m128d __A)
190 {
191   return (__m512d)
192 	 __builtin_ia32_broadcastf64x2_512_mask ((__v2df) __A,
193 						 _mm512_undefined_pd (),
194 						 (__mmask8) -1);
195 }
196 
197 extern __inline __m512d
198 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_broadcast_f64x2(__m512d __O,__mmask8 __M,__m128d __A)199 _mm512_mask_broadcast_f64x2 (__m512d __O, __mmask8 __M, __m128d __A)
200 {
201   return (__m512d) __builtin_ia32_broadcastf64x2_512_mask ((__v2df)
202 							   __A,
203 							   (__v8df)
204 							   __O, __M);
205 }
206 
207 extern __inline __m512d
208 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_broadcast_f64x2(__mmask8 __M,__m128d __A)209 _mm512_maskz_broadcast_f64x2 (__mmask8 __M, __m128d __A)
210 {
211   return (__m512d) __builtin_ia32_broadcastf64x2_512_mask ((__v2df)
212 							   __A,
213 							   (__v8df)
214 							   _mm512_setzero_ps (),
215 							   __M);
216 }
217 
218 extern __inline __m512i
219 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_broadcast_i64x2(__m128i __A)220 _mm512_broadcast_i64x2 (__m128i __A)
221 {
222   return (__m512i)
223 	 __builtin_ia32_broadcasti64x2_512_mask ((__v2di) __A,
224 						 _mm512_undefined_epi32 (),
225 						 (__mmask8) -1);
226 }
227 
228 extern __inline __m512i
229 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_broadcast_i64x2(__m512i __O,__mmask8 __M,__m128i __A)230 _mm512_mask_broadcast_i64x2 (__m512i __O, __mmask8 __M, __m128i __A)
231 {
232   return (__m512i) __builtin_ia32_broadcasti64x2_512_mask ((__v2di)
233 							   __A,
234 							   (__v8di)
235 							   __O, __M);
236 }
237 
238 extern __inline __m512i
239 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_broadcast_i64x2(__mmask8 __M,__m128i __A)240 _mm512_maskz_broadcast_i64x2 (__mmask8 __M, __m128i __A)
241 {
242   return (__m512i) __builtin_ia32_broadcasti64x2_512_mask ((__v2di)
243 							   __A,
244 							   (__v8di)
245 							   _mm512_setzero_si512 (),
246 							   __M);
247 }
248 
249 extern __inline __m512
250 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_broadcast_f32x2(__m128 __A)251 _mm512_broadcast_f32x2 (__m128 __A)
252 {
253   return (__m512)
254 	 __builtin_ia32_broadcastf32x2_512_mask ((__v4sf) __A,
255 						 (__v16sf)_mm512_undefined_ps (),
256 						 (__mmask16) -1);
257 }
258 
259 extern __inline __m512
260 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_broadcast_f32x2(__m512 __O,__mmask16 __M,__m128 __A)261 _mm512_mask_broadcast_f32x2 (__m512 __O, __mmask16 __M, __m128 __A)
262 {
263   return (__m512) __builtin_ia32_broadcastf32x2_512_mask ((__v4sf) __A,
264 							  (__v16sf)
265 							  __O, __M);
266 }
267 
268 extern __inline __m512
269 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_broadcast_f32x2(__mmask16 __M,__m128 __A)270 _mm512_maskz_broadcast_f32x2 (__mmask16 __M, __m128 __A)
271 {
272   return (__m512) __builtin_ia32_broadcastf32x2_512_mask ((__v4sf) __A,
273 							  (__v16sf)
274 							  _mm512_setzero_ps (),
275 							  __M);
276 }
277 
278 extern __inline __m512i
279 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_broadcast_i32x2(__m128i __A)280 _mm512_broadcast_i32x2 (__m128i __A)
281 {
282   return (__m512i)
283 	 __builtin_ia32_broadcasti32x2_512_mask ((__v4si) __A,
284 						 (__v16si)
285 						 _mm512_undefined_epi32 (),
286 						 (__mmask16) -1);
287 }
288 
289 extern __inline __m512i
290 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_broadcast_i32x2(__m512i __O,__mmask16 __M,__m128i __A)291 _mm512_mask_broadcast_i32x2 (__m512i __O, __mmask16 __M, __m128i __A)
292 {
293   return (__m512i) __builtin_ia32_broadcasti32x2_512_mask ((__v4si)
294 							   __A,
295 							   (__v16si)
296 							   __O, __M);
297 }
298 
299 extern __inline __m512i
300 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_broadcast_i32x2(__mmask16 __M,__m128i __A)301 _mm512_maskz_broadcast_i32x2 (__mmask16 __M, __m128i __A)
302 {
303   return (__m512i) __builtin_ia32_broadcasti32x2_512_mask ((__v4si)
304 							   __A,
305 							   (__v16si)
306 							   _mm512_setzero_si512 (),
307 							   __M);
308 }
309 
310 extern __inline __m512
311 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_broadcast_f32x8(__m256 __A)312 _mm512_broadcast_f32x8 (__m256 __A)
313 {
314   return (__m512)
315 	 __builtin_ia32_broadcastf32x8_512_mask ((__v8sf) __A,
316 						 _mm512_undefined_ps (),
317 						 (__mmask16) -1);
318 }
319 
320 extern __inline __m512
321 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_broadcast_f32x8(__m512 __O,__mmask16 __M,__m256 __A)322 _mm512_mask_broadcast_f32x8 (__m512 __O, __mmask16 __M, __m256 __A)
323 {
324   return (__m512) __builtin_ia32_broadcastf32x8_512_mask ((__v8sf) __A,
325 							  (__v16sf)__O,
326 							  __M);
327 }
328 
329 extern __inline __m512
330 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_broadcast_f32x8(__mmask16 __M,__m256 __A)331 _mm512_maskz_broadcast_f32x8 (__mmask16 __M, __m256 __A)
332 {
333   return (__m512) __builtin_ia32_broadcastf32x8_512_mask ((__v8sf) __A,
334 							  (__v16sf)
335 							  _mm512_setzero_ps (),
336 							  __M);
337 }
338 
339 extern __inline __m512i
340 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_broadcast_i32x8(__m256i __A)341 _mm512_broadcast_i32x8 (__m256i __A)
342 {
343   return (__m512i)
344 	 __builtin_ia32_broadcasti32x8_512_mask ((__v8si) __A,
345 						 (__v16si)
346 						 _mm512_undefined_epi32 (),
347 						 (__mmask16) -1);
348 }
349 
350 extern __inline __m512i
351 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_broadcast_i32x8(__m512i __O,__mmask16 __M,__m256i __A)352 _mm512_mask_broadcast_i32x8 (__m512i __O, __mmask16 __M, __m256i __A)
353 {
354   return (__m512i) __builtin_ia32_broadcasti32x8_512_mask ((__v8si)
355 							   __A,
356 							   (__v16si)__O,
357 							   __M);
358 }
359 
360 extern __inline __m512i
361 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_broadcast_i32x8(__mmask16 __M,__m256i __A)362 _mm512_maskz_broadcast_i32x8 (__mmask16 __M, __m256i __A)
363 {
364   return (__m512i) __builtin_ia32_broadcasti32x8_512_mask ((__v8si)
365 							   __A,
366 							   (__v16si)
367 							   _mm512_setzero_si512 (),
368 							   __M);
369 }
370 
371 extern __inline __m512i
372 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mullo_epi64(__m512i __A,__m512i __B)373 _mm512_mullo_epi64 (__m512i __A, __m512i __B)
374 {
375   return (__m512i) ((__v8du) __A * (__v8du) __B);
376 }
377 
378 extern __inline __m512i
379 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_mullo_epi64(__m512i __W,__mmask8 __U,__m512i __A,__m512i __B)380 _mm512_mask_mullo_epi64 (__m512i __W, __mmask8 __U, __m512i __A,
381 			 __m512i __B)
382 {
383   return (__m512i) __builtin_ia32_pmullq512_mask ((__v8di) __A,
384 						  (__v8di) __B,
385 						  (__v8di) __W,
386 						  (__mmask8) __U);
387 }
388 
389 extern __inline __m512i
390 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_mullo_epi64(__mmask8 __U,__m512i __A,__m512i __B)391 _mm512_maskz_mullo_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
392 {
393   return (__m512i) __builtin_ia32_pmullq512_mask ((__v8di) __A,
394 						  (__v8di) __B,
395 						  (__v8di)
396 						  _mm512_setzero_si512 (),
397 						  (__mmask8) __U);
398 }
399 
400 extern __inline __m512d
401 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_xor_pd(__m512d __A,__m512d __B)402 _mm512_xor_pd (__m512d __A, __m512d __B)
403 {
404   return (__m512d) __builtin_ia32_xorpd512_mask ((__v8df) __A,
405 						 (__v8df) __B,
406 						 (__v8df)
407 						 _mm512_setzero_pd (),
408 						 (__mmask8) -1);
409 }
410 
411 extern __inline __m512d
412 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_xor_pd(__m512d __W,__mmask8 __U,__m512d __A,__m512d __B)413 _mm512_mask_xor_pd (__m512d __W, __mmask8 __U, __m512d __A,
414 		    __m512d __B)
415 {
416   return (__m512d) __builtin_ia32_xorpd512_mask ((__v8df) __A,
417 						 (__v8df) __B,
418 						 (__v8df) __W,
419 						 (__mmask8) __U);
420 }
421 
422 extern __inline __m512d
423 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_xor_pd(__mmask8 __U,__m512d __A,__m512d __B)424 _mm512_maskz_xor_pd (__mmask8 __U, __m512d __A, __m512d __B)
425 {
426   return (__m512d) __builtin_ia32_xorpd512_mask ((__v8df) __A,
427 						 (__v8df) __B,
428 						 (__v8df)
429 						 _mm512_setzero_pd (),
430 						 (__mmask8) __U);
431 }
432 
433 extern __inline __m512
434 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_xor_ps(__m512 __A,__m512 __B)435 _mm512_xor_ps (__m512 __A, __m512 __B)
436 {
437   return (__m512) __builtin_ia32_xorps512_mask ((__v16sf) __A,
438 						(__v16sf) __B,
439 						(__v16sf)
440 						_mm512_setzero_ps (),
441 						(__mmask16) -1);
442 }
443 
444 extern __inline __m512
445 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_xor_ps(__m512 __W,__mmask16 __U,__m512 __A,__m512 __B)446 _mm512_mask_xor_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
447 {
448   return (__m512) __builtin_ia32_xorps512_mask ((__v16sf) __A,
449 						(__v16sf) __B,
450 						(__v16sf) __W,
451 						(__mmask16) __U);
452 }
453 
454 extern __inline __m512
455 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_xor_ps(__mmask16 __U,__m512 __A,__m512 __B)456 _mm512_maskz_xor_ps (__mmask16 __U, __m512 __A, __m512 __B)
457 {
458   return (__m512) __builtin_ia32_xorps512_mask ((__v16sf) __A,
459 						(__v16sf) __B,
460 						(__v16sf)
461 						_mm512_setzero_ps (),
462 						(__mmask16) __U);
463 }
464 
465 extern __inline __m512d
466 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_or_pd(__m512d __A,__m512d __B)467 _mm512_or_pd (__m512d __A, __m512d __B)
468 {
469   return (__m512d) __builtin_ia32_orpd512_mask ((__v8df) __A,
470 						(__v8df) __B,
471 						(__v8df)
472 						_mm512_setzero_pd (),
473 						(__mmask8) -1);
474 }
475 
476 extern __inline __m512d
477 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_or_pd(__m512d __W,__mmask8 __U,__m512d __A,__m512d __B)478 _mm512_mask_or_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
479 {
480   return (__m512d) __builtin_ia32_orpd512_mask ((__v8df) __A,
481 						(__v8df) __B,
482 						(__v8df) __W,
483 						(__mmask8) __U);
484 }
485 
486 extern __inline __m512d
487 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_or_pd(__mmask8 __U,__m512d __A,__m512d __B)488 _mm512_maskz_or_pd (__mmask8 __U, __m512d __A, __m512d __B)
489 {
490   return (__m512d) __builtin_ia32_orpd512_mask ((__v8df) __A,
491 						(__v8df) __B,
492 						(__v8df)
493 						_mm512_setzero_pd (),
494 						(__mmask8) __U);
495 }
496 
497 extern __inline __m512
498 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_or_ps(__m512 __A,__m512 __B)499 _mm512_or_ps (__m512 __A, __m512 __B)
500 {
501   return (__m512) __builtin_ia32_orps512_mask ((__v16sf) __A,
502 					       (__v16sf) __B,
503 					       (__v16sf)
504 					       _mm512_setzero_ps (),
505 					       (__mmask16) -1);
506 }
507 
508 extern __inline __m512
509 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_or_ps(__m512 __W,__mmask16 __U,__m512 __A,__m512 __B)510 _mm512_mask_or_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
511 {
512   return (__m512) __builtin_ia32_orps512_mask ((__v16sf) __A,
513 					       (__v16sf) __B,
514 					       (__v16sf) __W,
515 					       (__mmask16) __U);
516 }
517 
518 extern __inline __m512
519 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_or_ps(__mmask16 __U,__m512 __A,__m512 __B)520 _mm512_maskz_or_ps (__mmask16 __U, __m512 __A, __m512 __B)
521 {
522   return (__m512) __builtin_ia32_orps512_mask ((__v16sf) __A,
523 					       (__v16sf) __B,
524 					       (__v16sf)
525 					       _mm512_setzero_ps (),
526 					       (__mmask16) __U);
527 }
528 
529 extern __inline __m512d
530 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_and_pd(__m512d __A,__m512d __B)531 _mm512_and_pd (__m512d __A, __m512d __B)
532 {
533   return (__m512d) __builtin_ia32_andpd512_mask ((__v8df) __A,
534 						 (__v8df) __B,
535 						 (__v8df)
536 						 _mm512_setzero_pd (),
537 						 (__mmask8) -1);
538 }
539 
540 extern __inline __m512d
541 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_and_pd(__m512d __W,__mmask8 __U,__m512d __A,__m512d __B)542 _mm512_mask_and_pd (__m512d __W, __mmask8 __U, __m512d __A,
543 		    __m512d __B)
544 {
545   return (__m512d) __builtin_ia32_andpd512_mask ((__v8df) __A,
546 						 (__v8df) __B,
547 						 (__v8df) __W,
548 						 (__mmask8) __U);
549 }
550 
551 extern __inline __m512d
552 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_and_pd(__mmask8 __U,__m512d __A,__m512d __B)553 _mm512_maskz_and_pd (__mmask8 __U, __m512d __A, __m512d __B)
554 {
555   return (__m512d) __builtin_ia32_andpd512_mask ((__v8df) __A,
556 						 (__v8df) __B,
557 						 (__v8df)
558 						 _mm512_setzero_pd (),
559 						 (__mmask8) __U);
560 }
561 
562 extern __inline __m512
563 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_and_ps(__m512 __A,__m512 __B)564 _mm512_and_ps (__m512 __A, __m512 __B)
565 {
566   return (__m512) __builtin_ia32_andps512_mask ((__v16sf) __A,
567 						(__v16sf) __B,
568 						(__v16sf)
569 						_mm512_setzero_ps (),
570 						(__mmask16) -1);
571 }
572 
573 extern __inline __m512
574 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_and_ps(__m512 __W,__mmask16 __U,__m512 __A,__m512 __B)575 _mm512_mask_and_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
576 {
577   return (__m512) __builtin_ia32_andps512_mask ((__v16sf) __A,
578 						(__v16sf) __B,
579 						(__v16sf) __W,
580 						(__mmask16) __U);
581 }
582 
583 extern __inline __m512
584 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_and_ps(__mmask16 __U,__m512 __A,__m512 __B)585 _mm512_maskz_and_ps (__mmask16 __U, __m512 __A, __m512 __B)
586 {
587   return (__m512) __builtin_ia32_andps512_mask ((__v16sf) __A,
588 						(__v16sf) __B,
589 						(__v16sf)
590 						_mm512_setzero_ps (),
591 						(__mmask16) __U);
592 }
593 
594 extern __inline __m512d
595 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_andnot_pd(__m512d __A,__m512d __B)596 _mm512_andnot_pd (__m512d __A, __m512d __B)
597 {
598   return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A,
599 						  (__v8df) __B,
600 						  (__v8df)
601 						  _mm512_setzero_pd (),
602 						  (__mmask8) -1);
603 }
604 
605 extern __inline __m512d
606 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_andnot_pd(__m512d __W,__mmask8 __U,__m512d __A,__m512d __B)607 _mm512_mask_andnot_pd (__m512d __W, __mmask8 __U, __m512d __A,
608 		       __m512d __B)
609 {
610   return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A,
611 						  (__v8df) __B,
612 						  (__v8df) __W,
613 						  (__mmask8) __U);
614 }
615 
616 extern __inline __m512d
617 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_andnot_pd(__mmask8 __U,__m512d __A,__m512d __B)618 _mm512_maskz_andnot_pd (__mmask8 __U, __m512d __A, __m512d __B)
619 {
620   return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A,
621 						  (__v8df) __B,
622 						  (__v8df)
623 						  _mm512_setzero_pd (),
624 						  (__mmask8) __U);
625 }
626 
627 extern __inline __m512
628 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_andnot_ps(__m512 __A,__m512 __B)629 _mm512_andnot_ps (__m512 __A, __m512 __B)
630 {
631   return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A,
632 						 (__v16sf) __B,
633 						 (__v16sf)
634 						 _mm512_setzero_ps (),
635 						 (__mmask16) -1);
636 }
637 
638 extern __inline __m512
639 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_andnot_ps(__m512 __W,__mmask16 __U,__m512 __A,__m512 __B)640 _mm512_mask_andnot_ps (__m512 __W, __mmask16 __U, __m512 __A,
641 		       __m512 __B)
642 {
643   return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A,
644 						 (__v16sf) __B,
645 						 (__v16sf) __W,
646 						 (__mmask16) __U);
647 }
648 
649 extern __inline __m512
650 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_andnot_ps(__mmask16 __U,__m512 __A,__m512 __B)651 _mm512_maskz_andnot_ps (__mmask16 __U, __m512 __A, __m512 __B)
652 {
653   return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A,
654 						 (__v16sf) __B,
655 						 (__v16sf)
656 						 _mm512_setzero_ps (),
657 						 (__mmask16) __U);
658 }
659 
660 extern __inline __mmask16
661 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_movepi32_mask(__m512i __A)662 _mm512_movepi32_mask (__m512i __A)
663 {
664   return (__mmask16) __builtin_ia32_cvtd2mask512 ((__v16si) __A);
665 }
666 
667 extern __inline __mmask8
668 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_movepi64_mask(__m512i __A)669 _mm512_movepi64_mask (__m512i __A)
670 {
671   return (__mmask8) __builtin_ia32_cvtq2mask512 ((__v8di) __A);
672 }
673 
674 extern __inline __m512i
675 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_movm_epi32(__mmask16 __A)676 _mm512_movm_epi32 (__mmask16 __A)
677 {
678   return (__m512i) __builtin_ia32_cvtmask2d512 (__A);
679 }
680 
681 extern __inline __m512i
682 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_movm_epi64(__mmask8 __A)683 _mm512_movm_epi64 (__mmask8 __A)
684 {
685   return (__m512i) __builtin_ia32_cvtmask2q512 (__A);
686 }
687 
688 extern __inline __m512i
689 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvttpd_epi64(__m512d __A)690 _mm512_cvttpd_epi64 (__m512d __A)
691 {
692   return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
693 						     (__v8di)
694 						     _mm512_setzero_si512 (),
695 						     (__mmask8) -1,
696 						     _MM_FROUND_CUR_DIRECTION);
697 }
698 
699 extern __inline __m512i
700 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvttpd_epi64(__m512i __W,__mmask8 __U,__m512d __A)701 _mm512_mask_cvttpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A)
702 {
703   return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
704 						     (__v8di) __W,
705 						     (__mmask8) __U,
706 						     _MM_FROUND_CUR_DIRECTION);
707 }
708 
709 extern __inline __m512i
710 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvttpd_epi64(__mmask8 __U,__m512d __A)711 _mm512_maskz_cvttpd_epi64 (__mmask8 __U, __m512d __A)
712 {
713   return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
714 						     (__v8di)
715 						     _mm512_setzero_si512 (),
716 						     (__mmask8) __U,
717 						     _MM_FROUND_CUR_DIRECTION);
718 }
719 
720 extern __inline __m512i
721 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvttpd_epu64(__m512d __A)722 _mm512_cvttpd_epu64 (__m512d __A)
723 {
724   return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
725 						      (__v8di)
726 						      _mm512_setzero_si512 (),
727 						      (__mmask8) -1,
728 						      _MM_FROUND_CUR_DIRECTION);
729 }
730 
731 extern __inline __m512i
732 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvttpd_epu64(__m512i __W,__mmask8 __U,__m512d __A)733 _mm512_mask_cvttpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A)
734 {
735   return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
736 						      (__v8di) __W,
737 						      (__mmask8) __U,
738 						      _MM_FROUND_CUR_DIRECTION);
739 }
740 
741 extern __inline __m512i
742 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvttpd_epu64(__mmask8 __U,__m512d __A)743 _mm512_maskz_cvttpd_epu64 (__mmask8 __U, __m512d __A)
744 {
745   return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
746 						      (__v8di)
747 						      _mm512_setzero_si512 (),
748 						      (__mmask8) __U,
749 						      _MM_FROUND_CUR_DIRECTION);
750 }
751 
752 extern __inline __m512i
753 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvttps_epi64(__m256 __A)754 _mm512_cvttps_epi64 (__m256 __A)
755 {
756   return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
757 						     (__v8di)
758 						     _mm512_setzero_si512 (),
759 						     (__mmask8) -1,
760 						     _MM_FROUND_CUR_DIRECTION);
761 }
762 
763 extern __inline __m512i
764 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvttps_epi64(__m512i __W,__mmask8 __U,__m256 __A)765 _mm512_mask_cvttps_epi64 (__m512i __W, __mmask8 __U, __m256 __A)
766 {
767   return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
768 						     (__v8di) __W,
769 						     (__mmask8) __U,
770 						     _MM_FROUND_CUR_DIRECTION);
771 }
772 
773 extern __inline __m512i
774 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvttps_epi64(__mmask8 __U,__m256 __A)775 _mm512_maskz_cvttps_epi64 (__mmask8 __U, __m256 __A)
776 {
777   return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
778 						     (__v8di)
779 						     _mm512_setzero_si512 (),
780 						     (__mmask8) __U,
781 						     _MM_FROUND_CUR_DIRECTION);
782 }
783 
784 extern __inline __m512i
785 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvttps_epu64(__m256 __A)786 _mm512_cvttps_epu64 (__m256 __A)
787 {
788   return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
789 						      (__v8di)
790 						      _mm512_setzero_si512 (),
791 						      (__mmask8) -1,
792 						      _MM_FROUND_CUR_DIRECTION);
793 }
794 
795 extern __inline __m512i
796 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvttps_epu64(__m512i __W,__mmask8 __U,__m256 __A)797 _mm512_mask_cvttps_epu64 (__m512i __W, __mmask8 __U, __m256 __A)
798 {
799   return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
800 						      (__v8di) __W,
801 						      (__mmask8) __U,
802 						      _MM_FROUND_CUR_DIRECTION);
803 }
804 
805 extern __inline __m512i
806 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvttps_epu64(__mmask8 __U,__m256 __A)807 _mm512_maskz_cvttps_epu64 (__mmask8 __U, __m256 __A)
808 {
809   return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
810 						      (__v8di)
811 						      _mm512_setzero_si512 (),
812 						      (__mmask8) __U,
813 						      _MM_FROUND_CUR_DIRECTION);
814 }
815 
816 extern __inline __m512i
817 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvtpd_epi64(__m512d __A)818 _mm512_cvtpd_epi64 (__m512d __A)
819 {
820   return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
821 						    (__v8di)
822 						    _mm512_setzero_si512 (),
823 						    (__mmask8) -1,
824 						    _MM_FROUND_CUR_DIRECTION);
825 }
826 
827 extern __inline __m512i
828 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvtpd_epi64(__m512i __W,__mmask8 __U,__m512d __A)829 _mm512_mask_cvtpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A)
830 {
831   return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
832 						    (__v8di) __W,
833 						    (__mmask8) __U,
834 						    _MM_FROUND_CUR_DIRECTION);
835 }
836 
837 extern __inline __m512i
838 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvtpd_epi64(__mmask8 __U,__m512d __A)839 _mm512_maskz_cvtpd_epi64 (__mmask8 __U, __m512d __A)
840 {
841   return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
842 						    (__v8di)
843 						    _mm512_setzero_si512 (),
844 						    (__mmask8) __U,
845 						    _MM_FROUND_CUR_DIRECTION);
846 }
847 
848 extern __inline __m512i
849 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvtpd_epu64(__m512d __A)850 _mm512_cvtpd_epu64 (__m512d __A)
851 {
852   return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
853 						     (__v8di)
854 						     _mm512_setzero_si512 (),
855 						     (__mmask8) -1,
856 						     _MM_FROUND_CUR_DIRECTION);
857 }
858 
859 extern __inline __m512i
860 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvtpd_epu64(__m512i __W,__mmask8 __U,__m512d __A)861 _mm512_mask_cvtpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A)
862 {
863   return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
864 						     (__v8di) __W,
865 						     (__mmask8) __U,
866 						     _MM_FROUND_CUR_DIRECTION);
867 }
868 
869 extern __inline __m512i
870 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvtpd_epu64(__mmask8 __U,__m512d __A)871 _mm512_maskz_cvtpd_epu64 (__mmask8 __U, __m512d __A)
872 {
873   return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
874 						     (__v8di)
875 						     _mm512_setzero_si512 (),
876 						     (__mmask8) __U,
877 						     _MM_FROUND_CUR_DIRECTION);
878 }
879 
880 extern __inline __m512i
881 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvtps_epi64(__m256 __A)882 _mm512_cvtps_epi64 (__m256 __A)
883 {
884   return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
885 						    (__v8di)
886 						    _mm512_setzero_si512 (),
887 						    (__mmask8) -1,
888 						    _MM_FROUND_CUR_DIRECTION);
889 }
890 
891 extern __inline __m512i
892 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvtps_epi64(__m512i __W,__mmask8 __U,__m256 __A)893 _mm512_mask_cvtps_epi64 (__m512i __W, __mmask8 __U, __m256 __A)
894 {
895   return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
896 						    (__v8di) __W,
897 						    (__mmask8) __U,
898 						    _MM_FROUND_CUR_DIRECTION);
899 }
900 
901 extern __inline __m512i
902 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvtps_epi64(__mmask8 __U,__m256 __A)903 _mm512_maskz_cvtps_epi64 (__mmask8 __U, __m256 __A)
904 {
905   return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
906 						    (__v8di)
907 						    _mm512_setzero_si512 (),
908 						    (__mmask8) __U,
909 						    _MM_FROUND_CUR_DIRECTION);
910 }
911 
912 extern __inline __m512i
913 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvtps_epu64(__m256 __A)914 _mm512_cvtps_epu64 (__m256 __A)
915 {
916   return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
917 						     (__v8di)
918 						     _mm512_setzero_si512 (),
919 						     (__mmask8) -1,
920 						     _MM_FROUND_CUR_DIRECTION);
921 }
922 
923 extern __inline __m512i
924 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvtps_epu64(__m512i __W,__mmask8 __U,__m256 __A)925 _mm512_mask_cvtps_epu64 (__m512i __W, __mmask8 __U, __m256 __A)
926 {
927   return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
928 						     (__v8di) __W,
929 						     (__mmask8) __U,
930 						     _MM_FROUND_CUR_DIRECTION);
931 }
932 
933 extern __inline __m512i
934 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvtps_epu64(__mmask8 __U,__m256 __A)935 _mm512_maskz_cvtps_epu64 (__mmask8 __U, __m256 __A)
936 {
937   return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
938 						     (__v8di)
939 						     _mm512_setzero_si512 (),
940 						     (__mmask8) __U,
941 						     _MM_FROUND_CUR_DIRECTION);
942 }
943 
944 extern __inline __m256
945 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvtepi64_ps(__m512i __A)946 _mm512_cvtepi64_ps (__m512i __A)
947 {
948   return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
949 						   (__v8sf)
950 						   _mm256_setzero_ps (),
951 						   (__mmask8) -1,
952 						   _MM_FROUND_CUR_DIRECTION);
953 }
954 
955 extern __inline __m256
956 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvtepi64_ps(__m256 __W,__mmask8 __U,__m512i __A)957 _mm512_mask_cvtepi64_ps (__m256 __W, __mmask8 __U, __m512i __A)
958 {
959   return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
960 						   (__v8sf) __W,
961 						   (__mmask8) __U,
962 						   _MM_FROUND_CUR_DIRECTION);
963 }
964 
965 extern __inline __m256
966 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvtepi64_ps(__mmask8 __U,__m512i __A)967 _mm512_maskz_cvtepi64_ps (__mmask8 __U, __m512i __A)
968 {
969   return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
970 						   (__v8sf)
971 						   _mm256_setzero_ps (),
972 						   (__mmask8) __U,
973 						   _MM_FROUND_CUR_DIRECTION);
974 }
975 
976 extern __inline __m256
977 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvtepu64_ps(__m512i __A)978 _mm512_cvtepu64_ps (__m512i __A)
979 {
980   return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
981 						    (__v8sf)
982 						    _mm256_setzero_ps (),
983 						    (__mmask8) -1,
984 						    _MM_FROUND_CUR_DIRECTION);
985 }
986 
987 extern __inline __m256
988 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvtepu64_ps(__m256 __W,__mmask8 __U,__m512i __A)989 _mm512_mask_cvtepu64_ps (__m256 __W, __mmask8 __U, __m512i __A)
990 {
991   return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
992 						    (__v8sf) __W,
993 						    (__mmask8) __U,
994 						    _MM_FROUND_CUR_DIRECTION);
995 }
996 
997 extern __inline __m256
998 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvtepu64_ps(__mmask8 __U,__m512i __A)999 _mm512_maskz_cvtepu64_ps (__mmask8 __U, __m512i __A)
1000 {
1001   return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
1002 						    (__v8sf)
1003 						    _mm256_setzero_ps (),
1004 						    (__mmask8) __U,
1005 						    _MM_FROUND_CUR_DIRECTION);
1006 }
1007 
1008 extern __inline __m512d
1009 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvtepi64_pd(__m512i __A)1010 _mm512_cvtepi64_pd (__m512i __A)
1011 {
1012   return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
1013 						    (__v8df)
1014 						    _mm512_setzero_pd (),
1015 						    (__mmask8) -1,
1016 						    _MM_FROUND_CUR_DIRECTION);
1017 }
1018 
1019 extern __inline __m512d
1020 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvtepi64_pd(__m512d __W,__mmask8 __U,__m512i __A)1021 _mm512_mask_cvtepi64_pd (__m512d __W, __mmask8 __U, __m512i __A)
1022 {
1023   return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
1024 						    (__v8df) __W,
1025 						    (__mmask8) __U,
1026 						    _MM_FROUND_CUR_DIRECTION);
1027 }
1028 
1029 extern __inline __m512d
1030 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvtepi64_pd(__mmask8 __U,__m512i __A)1031 _mm512_maskz_cvtepi64_pd (__mmask8 __U, __m512i __A)
1032 {
1033   return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
1034 						    (__v8df)
1035 						    _mm512_setzero_pd (),
1036 						    (__mmask8) __U,
1037 						    _MM_FROUND_CUR_DIRECTION);
1038 }
1039 
1040 extern __inline __m512d
1041 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvtepu64_pd(__m512i __A)1042 _mm512_cvtepu64_pd (__m512i __A)
1043 {
1044   return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
1045 						     (__v8df)
1046 						     _mm512_setzero_pd (),
1047 						     (__mmask8) -1,
1048 						     _MM_FROUND_CUR_DIRECTION);
1049 }
1050 
1051 extern __inline __m512d
1052 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvtepu64_pd(__m512d __W,__mmask8 __U,__m512i __A)1053 _mm512_mask_cvtepu64_pd (__m512d __W, __mmask8 __U, __m512i __A)
1054 {
1055   return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
1056 						     (__v8df) __W,
1057 						     (__mmask8) __U,
1058 						     _MM_FROUND_CUR_DIRECTION);
1059 }
1060 
1061 extern __inline __m512d
1062 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvtepu64_pd(__mmask8 __U,__m512i __A)1063 _mm512_maskz_cvtepu64_pd (__mmask8 __U, __m512i __A)
1064 {
1065   return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
1066 						     (__v8df)
1067 						     _mm512_setzero_pd (),
1068 						     (__mmask8) __U,
1069 						     _MM_FROUND_CUR_DIRECTION);
1070 }
1071 
1072 #ifdef __OPTIMIZE__
1073 extern __inline __mmask8
1074 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_kshiftli_mask8(__mmask8 __A,unsigned int __B)1075 _kshiftli_mask8 (__mmask8 __A, unsigned int __B)
1076 {
1077   return (__mmask8) __builtin_ia32_kshiftliqi ((__mmask8) __A, (__mmask8) __B);
1078 }
1079 
1080 extern __inline __mmask8
1081 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_kshiftri_mask8(__mmask8 __A,unsigned int __B)1082 _kshiftri_mask8 (__mmask8 __A, unsigned int __B)
1083 {
1084   return (__mmask8) __builtin_ia32_kshiftriqi ((__mmask8) __A, (__mmask8) __B);
1085 }
1086 
1087 extern __inline __m512d
1088 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_range_pd(__m512d __A,__m512d __B,int __C)1089 _mm512_range_pd (__m512d __A, __m512d __B, int __C)
1090 {
1091   return (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A,
1092 						   (__v8df) __B, __C,
1093 						   (__v8df)
1094 						   _mm512_setzero_pd (),
1095 						   (__mmask8) -1,
1096 						   _MM_FROUND_CUR_DIRECTION);
1097 }
1098 
1099 extern __inline __m512d
1100 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_range_pd(__m512d __W,__mmask8 __U,__m512d __A,__m512d __B,int __C)1101 _mm512_mask_range_pd (__m512d __W, __mmask8 __U,
1102 		      __m512d __A, __m512d __B, int __C)
1103 {
1104   return (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A,
1105 						   (__v8df) __B, __C,
1106 						   (__v8df) __W,
1107 						   (__mmask8) __U,
1108 						   _MM_FROUND_CUR_DIRECTION);
1109 }
1110 
1111 extern __inline __m512d
1112 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_range_pd(__mmask8 __U,__m512d __A,__m512d __B,int __C)1113 _mm512_maskz_range_pd (__mmask8 __U, __m512d __A, __m512d __B, int __C)
1114 {
1115   return (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A,
1116 						   (__v8df) __B, __C,
1117 						   (__v8df)
1118 						   _mm512_setzero_pd (),
1119 						   (__mmask8) __U,
1120 						   _MM_FROUND_CUR_DIRECTION);
1121 }
1122 
1123 extern __inline __m512
1124 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_range_ps(__m512 __A,__m512 __B,int __C)1125 _mm512_range_ps (__m512 __A, __m512 __B, int __C)
1126 {
1127   return (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A,
1128 						  (__v16sf) __B, __C,
1129 						  (__v16sf)
1130 						  _mm512_setzero_ps (),
1131 						  (__mmask16) -1,
1132 						  _MM_FROUND_CUR_DIRECTION);
1133 }
1134 
1135 extern __inline __m512
1136 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_range_ps(__m512 __W,__mmask16 __U,__m512 __A,__m512 __B,int __C)1137 _mm512_mask_range_ps (__m512 __W, __mmask16 __U,
1138 		      __m512 __A, __m512 __B, int __C)
1139 {
1140   return (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A,
1141 						  (__v16sf) __B, __C,
1142 						  (__v16sf) __W,
1143 						  (__mmask16) __U,
1144 						  _MM_FROUND_CUR_DIRECTION);
1145 }
1146 
1147 extern __inline __m512
1148 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_range_ps(__mmask16 __U,__m512 __A,__m512 __B,int __C)1149 _mm512_maskz_range_ps (__mmask16 __U, __m512 __A, __m512 __B, int __C)
1150 {
1151   return (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A,
1152 						  (__v16sf) __B, __C,
1153 						  (__v16sf)
1154 						  _mm512_setzero_ps (),
1155 						  (__mmask16) __U,
1156 						  _MM_FROUND_CUR_DIRECTION);
1157 }
1158 
1159 extern __inline __m128d
1160 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_reduce_sd(__m128d __A,__m128d __B,int __C)1161 _mm_reduce_sd (__m128d __A, __m128d __B, int __C)
1162 {
1163   return (__m128d) __builtin_ia32_reducesd_mask ((__v2df) __A,
1164 						 (__v2df) __B, __C,
1165 						 (__v2df) _mm_setzero_pd (),
1166 						 (__mmask8) -1);
1167 }
1168 
1169 extern __inline __m128d
1170 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_mask_reduce_sd(__m128d __W,__mmask8 __U,__m128d __A,__m128d __B,int __C)1171 _mm_mask_reduce_sd (__m128d __W,  __mmask8 __U, __m128d __A,
1172 		    __m128d __B, int __C)
1173 {
1174   return (__m128d) __builtin_ia32_reducesd_mask ((__v2df) __A,
1175 						 (__v2df) __B, __C,
1176 						 (__v2df) __W,
1177 						 (__mmask8) __U);
1178 }
1179 
1180 extern __inline __m128d
1181 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_maskz_reduce_sd(__mmask8 __U,__m128d __A,__m128d __B,int __C)1182 _mm_maskz_reduce_sd (__mmask8 __U, __m128d __A, __m128d __B, int __C)
1183 {
1184   return (__m128d) __builtin_ia32_reducesd_mask ((__v2df) __A,
1185 						 (__v2df) __B, __C,
1186 						 (__v2df) _mm_setzero_pd (),
1187 						 (__mmask8) __U);
1188 }
1189 
1190 extern __inline __m128
1191 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_reduce_ss(__m128 __A,__m128 __B,int __C)1192 _mm_reduce_ss (__m128 __A, __m128 __B, int __C)
1193 {
1194   return (__m128) __builtin_ia32_reducess_mask ((__v4sf) __A,
1195 						(__v4sf) __B, __C,
1196 						(__v4sf) _mm_setzero_ps (),
1197 						(__mmask8) -1);
1198 }
1199 
1200 
1201 extern __inline __m128
1202 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_mask_reduce_ss(__m128 __W,__mmask8 __U,__m128 __A,__m128 __B,int __C)1203 _mm_mask_reduce_ss (__m128 __W,  __mmask8 __U, __m128 __A,
1204 		    __m128 __B, int __C)
1205 {
1206   return (__m128) __builtin_ia32_reducess_mask ((__v4sf) __A,
1207 						(__v4sf) __B, __C,
1208 						(__v4sf) __W,
1209 						(__mmask8) __U);
1210 }
1211 
1212 extern __inline __m128
1213 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_maskz_reduce_ss(__mmask8 __U,__m128 __A,__m128 __B,int __C)1214 _mm_maskz_reduce_ss (__mmask8 __U, __m128 __A, __m128 __B, int __C)
1215 {
1216   return (__m128) __builtin_ia32_reducess_mask ((__v4sf) __A,
1217 						(__v4sf) __B, __C,
1218 						(__v4sf) _mm_setzero_ps (),
1219 						(__mmask8) __U);
1220 }
1221 
1222 extern __inline __m128d
1223 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_range_sd(__m128d __A,__m128d __B,int __C)1224 _mm_range_sd (__m128d __A, __m128d __B, int __C)
1225 {
1226   return (__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df) __A,
1227 						   (__v2df) __B, __C,
1228 						   (__v2df)
1229 						   _mm_setzero_pd (),
1230 						   (__mmask8) -1,
1231 						   _MM_FROUND_CUR_DIRECTION);
1232 }
1233 
1234 extern __inline __m128d
1235 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_mask_range_sd(__m128d __W,__mmask8 __U,__m128d __A,__m128d __B,int __C)1236 _mm_mask_range_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, int __C)
1237 {
1238   return (__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df) __A,
1239 						   (__v2df) __B, __C,
1240 						   (__v2df) __W,
1241 						   (__mmask8) __U,
1242 						   _MM_FROUND_CUR_DIRECTION);
1243 }
1244 
1245 extern __inline __m128d
1246 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_maskz_range_sd(__mmask8 __U,__m128d __A,__m128d __B,int __C)1247 _mm_maskz_range_sd (__mmask8 __U, __m128d __A, __m128d __B, int __C)
1248 {
1249   return (__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df) __A,
1250 						   (__v2df) __B, __C,
1251 						   (__v2df)
1252 						   _mm_setzero_pd (),
1253 						   (__mmask8) __U,
1254 						   _MM_FROUND_CUR_DIRECTION);
1255 }
1256 
1257 extern __inline __m128
1258 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_range_ss(__m128 __A,__m128 __B,int __C)1259 _mm_range_ss (__m128 __A, __m128 __B, int __C)
1260 {
1261   return (__m128) __builtin_ia32_rangess128_mask_round ((__v4sf) __A,
1262 						  (__v4sf) __B, __C,
1263 						  (__v4sf)
1264 						  _mm_setzero_ps (),
1265 						  (__mmask8) -1,
1266 						  _MM_FROUND_CUR_DIRECTION);
1267 }
1268 
1269 extern __inline __m128
1270 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_mask_range_ss(__m128 __W,__mmask8 __U,__m128 __A,__m128 __B,int __C)1271 _mm_mask_range_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, int __C)
1272 {
1273   return (__m128) __builtin_ia32_rangess128_mask_round ((__v4sf) __A,
1274 						  (__v4sf) __B, __C,
1275 						  (__v4sf) __W,
1276 						  (__mmask8) __U,
1277 						  _MM_FROUND_CUR_DIRECTION);
1278 }
1279 
1280 
1281 extern __inline __m128
1282 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_maskz_range_ss(__mmask8 __U,__m128 __A,__m128 __B,int __C)1283 _mm_maskz_range_ss (__mmask8 __U, __m128 __A, __m128 __B, int __C)
1284 {
1285   return (__m128) __builtin_ia32_rangess128_mask_round ((__v4sf) __A,
1286 						  (__v4sf) __B, __C,
1287 						  (__v4sf)
1288 						  _mm_setzero_ps (),
1289 						  (__mmask8) __U,
1290 						  _MM_FROUND_CUR_DIRECTION);
1291 }
1292 
1293 extern __inline __m128d
1294 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_range_round_sd(__m128d __A,__m128d __B,int __C,const int __R)1295 _mm_range_round_sd (__m128d __A, __m128d __B, int __C, const int __R)
1296 {
1297   return (__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df) __A,
1298 						   (__v2df) __B, __C,
1299 						   (__v2df)
1300 						   _mm_setzero_pd (),
1301 						   (__mmask8) -1, __R);
1302 }
1303 
1304 extern __inline __m128d
1305 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_mask_range_round_sd(__m128d __W,__mmask8 __U,__m128d __A,__m128d __B,int __C,const int __R)1306 _mm_mask_range_round_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B,
1307 			 int __C, const int __R)
1308 {
1309   return (__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df) __A,
1310 						   (__v2df) __B, __C,
1311 						   (__v2df) __W,
1312 						   (__mmask8) __U, __R);
1313 }
1314 
1315 extern __inline __m128d
1316 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_maskz_range_round_sd(__mmask8 __U,__m128d __A,__m128d __B,int __C,const int __R)1317 _mm_maskz_range_round_sd (__mmask8 __U, __m128d __A, __m128d __B, int __C,
1318 			  const int __R)
1319 {
1320   return (__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df) __A,
1321 						   (__v2df) __B, __C,
1322 						   (__v2df)
1323 						   _mm_setzero_pd (),
1324 						   (__mmask8) __U, __R);
1325 }
1326 
1327 extern __inline __m128
1328 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_range_round_ss(__m128 __A,__m128 __B,int __C,const int __R)1329 _mm_range_round_ss (__m128 __A, __m128 __B, int __C, const int __R)
1330 {
1331   return (__m128) __builtin_ia32_rangess128_mask_round ((__v4sf) __A,
1332 						  (__v4sf) __B, __C,
1333 						  (__v4sf)
1334 						  _mm_setzero_ps (),
1335 						  (__mmask8) -1, __R);
1336 }
1337 
1338 extern __inline __m128
1339 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_mask_range_round_ss(__m128 __W,__mmask8 __U,__m128 __A,__m128 __B,int __C,const int __R)1340 _mm_mask_range_round_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B,
1341 			 int __C, const int __R)
1342 {
1343   return (__m128) __builtin_ia32_rangess128_mask_round ((__v4sf) __A,
1344 						  (__v4sf) __B, __C,
1345 						  (__v4sf) __W,
1346 						  (__mmask8) __U, __R);
1347 }
1348 
1349 extern __inline __m128
1350 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_maskz_range_round_ss(__mmask8 __U,__m128 __A,__m128 __B,int __C,const int __R)1351 _mm_maskz_range_round_ss (__mmask8 __U, __m128 __A, __m128 __B, int __C,
1352 			  const int __R)
1353 {
1354   return (__m128) __builtin_ia32_rangess128_mask_round ((__v4sf) __A,
1355 						  (__v4sf) __B, __C,
1356 						  (__v4sf)
1357 						  _mm_setzero_ps (),
1358 						  (__mmask8) __U, __R);
1359 }
1360 
1361 extern __inline __mmask8
1362 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_fpclass_ss_mask(__m128 __A,const int __imm)1363 _mm_fpclass_ss_mask (__m128 __A, const int __imm)
1364 {
1365   return (__mmask8) __builtin_ia32_fpclassss_mask ((__v4sf) __A, __imm,
1366 						   (__mmask8) -1);
1367 }
1368 
1369 extern __inline __mmask8
1370 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_fpclass_sd_mask(__m128d __A,const int __imm)1371 _mm_fpclass_sd_mask (__m128d __A, const int __imm)
1372 {
1373   return (__mmask8) __builtin_ia32_fpclasssd_mask ((__v2df) __A, __imm,
1374 						   (__mmask8) -1);
1375 }
1376 
1377 extern __inline __mmask8
1378 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_mask_fpclass_ss_mask(__mmask8 __U,__m128 __A,const int __imm)1379 _mm_mask_fpclass_ss_mask (__mmask8 __U, __m128 __A, const int __imm)
1380 {
1381   return (__mmask8) __builtin_ia32_fpclassss_mask ((__v4sf) __A, __imm, __U);
1382 }
1383 
1384 extern __inline __mmask8
1385 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm_mask_fpclass_sd_mask(__mmask8 __U,__m128d __A,const int __imm)1386 _mm_mask_fpclass_sd_mask (__mmask8 __U, __m128d __A, const int __imm)
1387 {
1388   return (__mmask8) __builtin_ia32_fpclasssd_mask ((__v2df) __A, __imm, __U);
1389 }
1390 
1391 extern __inline __m512i
1392 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvtt_roundpd_epi64(__m512d __A,const int __R)1393 _mm512_cvtt_roundpd_epi64 (__m512d __A, const int __R)
1394 {
1395   return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
1396 						     (__v8di)
1397 						     _mm512_setzero_si512 (),
1398 						     (__mmask8) -1,
1399 						     __R);
1400 }
1401 
1402 extern __inline __m512i
1403 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvtt_roundpd_epi64(__m512i __W,__mmask8 __U,__m512d __A,const int __R)1404 _mm512_mask_cvtt_roundpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A,
1405 				const int __R)
1406 {
1407   return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
1408 						     (__v8di) __W,
1409 						     (__mmask8) __U,
1410 						     __R);
1411 }
1412 
1413 extern __inline __m512i
1414 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvtt_roundpd_epi64(__mmask8 __U,__m512d __A,const int __R)1415 _mm512_maskz_cvtt_roundpd_epi64 (__mmask8 __U, __m512d __A,
1416 				 const int __R)
1417 {
1418   return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
1419 						     (__v8di)
1420 						     _mm512_setzero_si512 (),
1421 						     (__mmask8) __U,
1422 						     __R);
1423 }
1424 
1425 extern __inline __m512i
1426 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvtt_roundpd_epu64(__m512d __A,const int __R)1427 _mm512_cvtt_roundpd_epu64 (__m512d __A, const int __R)
1428 {
1429   return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
1430 						      (__v8di)
1431 						      _mm512_setzero_si512 (),
1432 						      (__mmask8) -1,
1433 						      __R);
1434 }
1435 
1436 extern __inline __m512i
1437 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvtt_roundpd_epu64(__m512i __W,__mmask8 __U,__m512d __A,const int __R)1438 _mm512_mask_cvtt_roundpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A,
1439 				const int __R)
1440 {
1441   return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
1442 						      (__v8di) __W,
1443 						      (__mmask8) __U,
1444 						      __R);
1445 }
1446 
1447 extern __inline __m512i
1448 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvtt_roundpd_epu64(__mmask8 __U,__m512d __A,const int __R)1449 _mm512_maskz_cvtt_roundpd_epu64 (__mmask8 __U, __m512d __A,
1450 				 const int __R)
1451 {
1452   return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
1453 						      (__v8di)
1454 						      _mm512_setzero_si512 (),
1455 						      (__mmask8) __U,
1456 						      __R);
1457 }
1458 
1459 extern __inline __m512i
1460 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvtt_roundps_epi64(__m256 __A,const int __R)1461 _mm512_cvtt_roundps_epi64 (__m256 __A, const int __R)
1462 {
1463   return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
1464 						     (__v8di)
1465 						     _mm512_setzero_si512 (),
1466 						     (__mmask8) -1,
1467 						     __R);
1468 }
1469 
1470 extern __inline __m512i
1471 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvtt_roundps_epi64(__m512i __W,__mmask8 __U,__m256 __A,const int __R)1472 _mm512_mask_cvtt_roundps_epi64 (__m512i __W, __mmask8 __U, __m256 __A,
1473 				const int __R)
1474 {
1475   return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
1476 						     (__v8di) __W,
1477 						     (__mmask8) __U,
1478 						     __R);
1479 }
1480 
1481 extern __inline __m512i
1482 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvtt_roundps_epi64(__mmask8 __U,__m256 __A,const int __R)1483 _mm512_maskz_cvtt_roundps_epi64 (__mmask8 __U, __m256 __A,
1484 				 const int __R)
1485 {
1486   return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
1487 						     (__v8di)
1488 						     _mm512_setzero_si512 (),
1489 						     (__mmask8) __U,
1490 						     __R);
1491 }
1492 
1493 extern __inline __m512i
1494 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvtt_roundps_epu64(__m256 __A,const int __R)1495 _mm512_cvtt_roundps_epu64 (__m256 __A, const int __R)
1496 {
1497   return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
1498 						      (__v8di)
1499 						      _mm512_setzero_si512 (),
1500 						      (__mmask8) -1,
1501 						      __R);
1502 }
1503 
1504 extern __inline __m512i
1505 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvtt_roundps_epu64(__m512i __W,__mmask8 __U,__m256 __A,const int __R)1506 _mm512_mask_cvtt_roundps_epu64 (__m512i __W, __mmask8 __U, __m256 __A,
1507 				const int __R)
1508 {
1509   return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
1510 						      (__v8di) __W,
1511 						      (__mmask8) __U,
1512 						      __R);
1513 }
1514 
1515 extern __inline __m512i
1516 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvtt_roundps_epu64(__mmask8 __U,__m256 __A,const int __R)1517 _mm512_maskz_cvtt_roundps_epu64 (__mmask8 __U, __m256 __A,
1518 				 const int __R)
1519 {
1520   return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
1521 						      (__v8di)
1522 						      _mm512_setzero_si512 (),
1523 						      (__mmask8) __U,
1524 						      __R);
1525 }
1526 
1527 extern __inline __m512i
1528 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvt_roundpd_epi64(__m512d __A,const int __R)1529 _mm512_cvt_roundpd_epi64 (__m512d __A, const int __R)
1530 {
1531   return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
1532 						    (__v8di)
1533 						    _mm512_setzero_si512 (),
1534 						    (__mmask8) -1,
1535 						    __R);
1536 }
1537 
1538 extern __inline __m512i
1539 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvt_roundpd_epi64(__m512i __W,__mmask8 __U,__m512d __A,const int __R)1540 _mm512_mask_cvt_roundpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A,
1541 			       const int __R)
1542 {
1543   return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
1544 						    (__v8di) __W,
1545 						    (__mmask8) __U,
1546 						    __R);
1547 }
1548 
1549 extern __inline __m512i
1550 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvt_roundpd_epi64(__mmask8 __U,__m512d __A,const int __R)1551 _mm512_maskz_cvt_roundpd_epi64 (__mmask8 __U, __m512d __A,
1552 				const int __R)
1553 {
1554   return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
1555 						    (__v8di)
1556 						    _mm512_setzero_si512 (),
1557 						    (__mmask8) __U,
1558 						    __R);
1559 }
1560 
1561 extern __inline __m512i
1562 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvt_roundpd_epu64(__m512d __A,const int __R)1563 _mm512_cvt_roundpd_epu64 (__m512d __A, const int __R)
1564 {
1565   return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
1566 						     (__v8di)
1567 						     _mm512_setzero_si512 (),
1568 						     (__mmask8) -1,
1569 						     __R);
1570 }
1571 
1572 extern __inline __m512i
1573 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvt_roundpd_epu64(__m512i __W,__mmask8 __U,__m512d __A,const int __R)1574 _mm512_mask_cvt_roundpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A,
1575 			       const int __R)
1576 {
1577   return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
1578 						     (__v8di) __W,
1579 						     (__mmask8) __U,
1580 						     __R);
1581 }
1582 
1583 extern __inline __m512i
1584 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvt_roundpd_epu64(__mmask8 __U,__m512d __A,const int __R)1585 _mm512_maskz_cvt_roundpd_epu64 (__mmask8 __U, __m512d __A,
1586 				const int __R)
1587 {
1588   return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
1589 						     (__v8di)
1590 						     _mm512_setzero_si512 (),
1591 						     (__mmask8) __U,
1592 						     __R);
1593 }
1594 
1595 extern __inline __m512i
1596 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvt_roundps_epi64(__m256 __A,const int __R)1597 _mm512_cvt_roundps_epi64 (__m256 __A, const int __R)
1598 {
1599   return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
1600 						    (__v8di)
1601 						    _mm512_setzero_si512 (),
1602 						    (__mmask8) -1,
1603 						    __R);
1604 }
1605 
1606 extern __inline __m512i
1607 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvt_roundps_epi64(__m512i __W,__mmask8 __U,__m256 __A,const int __R)1608 _mm512_mask_cvt_roundps_epi64 (__m512i __W, __mmask8 __U, __m256 __A,
1609 			       const int __R)
1610 {
1611   return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
1612 						    (__v8di) __W,
1613 						    (__mmask8) __U,
1614 						    __R);
1615 }
1616 
1617 extern __inline __m512i
1618 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvt_roundps_epi64(__mmask8 __U,__m256 __A,const int __R)1619 _mm512_maskz_cvt_roundps_epi64 (__mmask8 __U, __m256 __A,
1620 				const int __R)
1621 {
1622   return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
1623 						    (__v8di)
1624 						    _mm512_setzero_si512 (),
1625 						    (__mmask8) __U,
1626 						    __R);
1627 }
1628 
1629 extern __inline __m512i
1630 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvt_roundps_epu64(__m256 __A,const int __R)1631 _mm512_cvt_roundps_epu64 (__m256 __A, const int __R)
1632 {
1633   return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
1634 						     (__v8di)
1635 						     _mm512_setzero_si512 (),
1636 						     (__mmask8) -1,
1637 						     __R);
1638 }
1639 
1640 extern __inline __m512i
1641 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvt_roundps_epu64(__m512i __W,__mmask8 __U,__m256 __A,const int __R)1642 _mm512_mask_cvt_roundps_epu64 (__m512i __W, __mmask8 __U, __m256 __A,
1643 			       const int __R)
1644 {
1645   return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
1646 						     (__v8di) __W,
1647 						     (__mmask8) __U,
1648 						     __R);
1649 }
1650 
1651 extern __inline __m512i
1652 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvt_roundps_epu64(__mmask8 __U,__m256 __A,const int __R)1653 _mm512_maskz_cvt_roundps_epu64 (__mmask8 __U, __m256 __A,
1654 				const int __R)
1655 {
1656   return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
1657 						     (__v8di)
1658 						     _mm512_setzero_si512 (),
1659 						     (__mmask8) __U,
1660 						     __R);
1661 }
1662 
1663 extern __inline __m256
1664 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvt_roundepi64_ps(__m512i __A,const int __R)1665 _mm512_cvt_roundepi64_ps (__m512i __A, const int __R)
1666 {
1667   return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
1668 						   (__v8sf)
1669 						   _mm256_setzero_ps (),
1670 						   (__mmask8) -1,
1671 						   __R);
1672 }
1673 
1674 extern __inline __m256
1675 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvt_roundepi64_ps(__m256 __W,__mmask8 __U,__m512i __A,const int __R)1676 _mm512_mask_cvt_roundepi64_ps (__m256 __W, __mmask8 __U, __m512i __A,
1677 			       const int __R)
1678 {
1679   return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
1680 						   (__v8sf) __W,
1681 						   (__mmask8) __U,
1682 						   __R);
1683 }
1684 
1685 extern __inline __m256
1686 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvt_roundepi64_ps(__mmask8 __U,__m512i __A,const int __R)1687 _mm512_maskz_cvt_roundepi64_ps (__mmask8 __U, __m512i __A,
1688 				const int __R)
1689 {
1690   return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
1691 						   (__v8sf)
1692 						   _mm256_setzero_ps (),
1693 						   (__mmask8) __U,
1694 						   __R);
1695 }
1696 
1697 extern __inline __m256
1698 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvt_roundepu64_ps(__m512i __A,const int __R)1699 _mm512_cvt_roundepu64_ps (__m512i __A, const int __R)
1700 {
1701   return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
1702 						    (__v8sf)
1703 						    _mm256_setzero_ps (),
1704 						    (__mmask8) -1,
1705 						    __R);
1706 }
1707 
1708 extern __inline __m256
1709 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvt_roundepu64_ps(__m256 __W,__mmask8 __U,__m512i __A,const int __R)1710 _mm512_mask_cvt_roundepu64_ps (__m256 __W, __mmask8 __U, __m512i __A,
1711 			       const int __R)
1712 {
1713   return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
1714 						    (__v8sf) __W,
1715 						    (__mmask8) __U,
1716 						    __R);
1717 }
1718 
1719 extern __inline __m256
1720 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvt_roundepu64_ps(__mmask8 __U,__m512i __A,const int __R)1721 _mm512_maskz_cvt_roundepu64_ps (__mmask8 __U, __m512i __A,
1722 				const int __R)
1723 {
1724   return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
1725 						    (__v8sf)
1726 						    _mm256_setzero_ps (),
1727 						    (__mmask8) __U,
1728 						    __R);
1729 }
1730 
1731 extern __inline __m512d
1732 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvt_roundepi64_pd(__m512i __A,const int __R)1733 _mm512_cvt_roundepi64_pd (__m512i __A, const int __R)
1734 {
1735   return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
1736 						    (__v8df)
1737 						    _mm512_setzero_pd (),
1738 						    (__mmask8) -1,
1739 						    __R);
1740 }
1741 
1742 extern __inline __m512d
1743 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvt_roundepi64_pd(__m512d __W,__mmask8 __U,__m512i __A,const int __R)1744 _mm512_mask_cvt_roundepi64_pd (__m512d __W, __mmask8 __U, __m512i __A,
1745 			       const int __R)
1746 {
1747   return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
1748 						    (__v8df) __W,
1749 						    (__mmask8) __U,
1750 						    __R);
1751 }
1752 
1753 extern __inline __m512d
1754 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvt_roundepi64_pd(__mmask8 __U,__m512i __A,const int __R)1755 _mm512_maskz_cvt_roundepi64_pd (__mmask8 __U, __m512i __A,
1756 				const int __R)
1757 {
1758   return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
1759 						    (__v8df)
1760 						    _mm512_setzero_pd (),
1761 						    (__mmask8) __U,
1762 						    __R);
1763 }
1764 
1765 extern __inline __m512d
1766 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_cvt_roundepu64_pd(__m512i __A,const int __R)1767 _mm512_cvt_roundepu64_pd (__m512i __A, const int __R)
1768 {
1769   return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
1770 						     (__v8df)
1771 						     _mm512_setzero_pd (),
1772 						     (__mmask8) -1,
1773 						     __R);
1774 }
1775 
1776 extern __inline __m512d
1777 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_cvt_roundepu64_pd(__m512d __W,__mmask8 __U,__m512i __A,const int __R)1778 _mm512_mask_cvt_roundepu64_pd (__m512d __W, __mmask8 __U, __m512i __A,
1779 			       const int __R)
1780 {
1781   return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
1782 						     (__v8df) __W,
1783 						     (__mmask8) __U,
1784 						     __R);
1785 }
1786 
1787 extern __inline __m512d
1788 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_cvt_roundepu64_pd(__mmask8 __U,__m512i __A,const int __R)1789 _mm512_maskz_cvt_roundepu64_pd (__mmask8 __U, __m512i __A,
1790 				const int __R)
1791 {
1792   return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
1793 						     (__v8df)
1794 						     _mm512_setzero_pd (),
1795 						     (__mmask8) __U,
1796 						     __R);
1797 }
1798 
1799 extern __inline __m512d
1800 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_reduce_pd(__m512d __A,int __B)1801 _mm512_reduce_pd (__m512d __A, int __B)
1802 {
1803   return (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B,
1804 						    (__v8df)
1805 						    _mm512_setzero_pd (),
1806 						    (__mmask8) -1);
1807 }
1808 
1809 extern __inline __m512d
1810 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_reduce_pd(__m512d __W,__mmask8 __U,__m512d __A,int __B)1811 _mm512_mask_reduce_pd (__m512d __W, __mmask8 __U, __m512d __A, int __B)
1812 {
1813   return (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B,
1814 						    (__v8df) __W,
1815 						    (__mmask8) __U);
1816 }
1817 
1818 extern __inline __m512d
1819 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_reduce_pd(__mmask8 __U,__m512d __A,int __B)1820 _mm512_maskz_reduce_pd (__mmask8 __U, __m512d __A, int __B)
1821 {
1822   return (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B,
1823 						    (__v8df)
1824 						    _mm512_setzero_pd (),
1825 						    (__mmask8) __U);
1826 }
1827 
1828 extern __inline __m512
1829 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_reduce_ps(__m512 __A,int __B)1830 _mm512_reduce_ps (__m512 __A, int __B)
1831 {
1832   return (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B,
1833 						   (__v16sf)
1834 						   _mm512_setzero_ps (),
1835 						   (__mmask16) -1);
1836 }
1837 
1838 extern __inline __m512
1839 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_reduce_ps(__m512 __W,__mmask16 __U,__m512 __A,int __B)1840 _mm512_mask_reduce_ps (__m512 __W, __mmask16 __U, __m512 __A, int __B)
1841 {
1842   return (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B,
1843 						   (__v16sf) __W,
1844 						   (__mmask16) __U);
1845 }
1846 
1847 extern __inline __m512
1848 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_reduce_ps(__mmask16 __U,__m512 __A,int __B)1849 _mm512_maskz_reduce_ps (__mmask16 __U, __m512 __A, int __B)
1850 {
1851   return (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B,
1852 						   (__v16sf)
1853 						   _mm512_setzero_ps (),
1854 						   (__mmask16) __U);
1855 }
1856 
1857 extern __inline __m256
1858 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_extractf32x8_ps(__m512 __A,const int __imm)1859 _mm512_extractf32x8_ps (__m512 __A, const int __imm)
1860 {
1861   return (__m256) __builtin_ia32_extractf32x8_mask ((__v16sf) __A,
1862 						    __imm,
1863 						    (__v8sf)
1864 						    _mm256_setzero_ps (),
1865 						    (__mmask8) -1);
1866 }
1867 
1868 extern __inline __m256
1869 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_extractf32x8_ps(__m256 __W,__mmask8 __U,__m512 __A,const int __imm)1870 _mm512_mask_extractf32x8_ps (__m256 __W, __mmask8 __U, __m512 __A,
1871 			     const int __imm)
1872 {
1873   return (__m256) __builtin_ia32_extractf32x8_mask ((__v16sf) __A,
1874 						    __imm,
1875 						    (__v8sf) __W,
1876 						    (__mmask8) __U);
1877 }
1878 
1879 extern __inline __m256
1880 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_extractf32x8_ps(__mmask8 __U,__m512 __A,const int __imm)1881 _mm512_maskz_extractf32x8_ps (__mmask8 __U, __m512 __A,
1882 			      const int __imm)
1883 {
1884   return (__m256) __builtin_ia32_extractf32x8_mask ((__v16sf) __A,
1885 						    __imm,
1886 						    (__v8sf)
1887 						    _mm256_setzero_ps (),
1888 						    (__mmask8) __U);
1889 }
1890 
1891 extern __inline __m128d
1892 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_extractf64x2_pd(__m512d __A,const int __imm)1893 _mm512_extractf64x2_pd (__m512d __A, const int __imm)
1894 {
1895   return (__m128d) __builtin_ia32_extractf64x2_512_mask ((__v8df) __A,
1896 							 __imm,
1897 							 (__v2df)
1898 							 _mm_setzero_pd (),
1899 							 (__mmask8) -1);
1900 }
1901 
1902 extern __inline __m128d
1903 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_extractf64x2_pd(__m128d __W,__mmask8 __U,__m512d __A,const int __imm)1904 _mm512_mask_extractf64x2_pd (__m128d __W, __mmask8 __U, __m512d __A,
1905 			     const int __imm)
1906 {
1907   return (__m128d) __builtin_ia32_extractf64x2_512_mask ((__v8df) __A,
1908 							 __imm,
1909 							 (__v2df) __W,
1910 							 (__mmask8)
1911 							 __U);
1912 }
1913 
1914 extern __inline __m128d
1915 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_extractf64x2_pd(__mmask8 __U,__m512d __A,const int __imm)1916 _mm512_maskz_extractf64x2_pd (__mmask8 __U, __m512d __A,
1917 			      const int __imm)
1918 {
1919   return (__m128d) __builtin_ia32_extractf64x2_512_mask ((__v8df) __A,
1920 							 __imm,
1921 							 (__v2df)
1922 							 _mm_setzero_pd (),
1923 							 (__mmask8)
1924 							 __U);
1925 }
1926 
1927 extern __inline __m256i
1928 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_extracti32x8_epi32(__m512i __A,const int __imm)1929 _mm512_extracti32x8_epi32 (__m512i __A, const int __imm)
1930 {
1931   return (__m256i) __builtin_ia32_extracti32x8_mask ((__v16si) __A,
1932 						     __imm,
1933 						     (__v8si)
1934 						     _mm256_setzero_si256 (),
1935 						     (__mmask8) -1);
1936 }
1937 
1938 extern __inline __m256i
1939 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_extracti32x8_epi32(__m256i __W,__mmask8 __U,__m512i __A,const int __imm)1940 _mm512_mask_extracti32x8_epi32 (__m256i __W, __mmask8 __U, __m512i __A,
1941 				const int __imm)
1942 {
1943   return (__m256i) __builtin_ia32_extracti32x8_mask ((__v16si) __A,
1944 						     __imm,
1945 						     (__v8si) __W,
1946 						     (__mmask8) __U);
1947 }
1948 
1949 extern __inline __m256i
1950 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_extracti32x8_epi32(__mmask8 __U,__m512i __A,const int __imm)1951 _mm512_maskz_extracti32x8_epi32 (__mmask8 __U, __m512i __A,
1952 				 const int __imm)
1953 {
1954   return (__m256i) __builtin_ia32_extracti32x8_mask ((__v16si) __A,
1955 						     __imm,
1956 						     (__v8si)
1957 						     _mm256_setzero_si256 (),
1958 						     (__mmask8) __U);
1959 }
1960 
1961 extern __inline __m128i
1962 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_extracti64x2_epi64(__m512i __A,const int __imm)1963 _mm512_extracti64x2_epi64 (__m512i __A, const int __imm)
1964 {
1965   return (__m128i) __builtin_ia32_extracti64x2_512_mask ((__v8di) __A,
1966 							 __imm,
1967 							 (__v2di)
1968 							 _mm_setzero_si128 (),
1969 							 (__mmask8) -1);
1970 }
1971 
1972 extern __inline __m128i
1973 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_extracti64x2_epi64(__m128i __W,__mmask8 __U,__m512i __A,const int __imm)1974 _mm512_mask_extracti64x2_epi64 (__m128i __W, __mmask8 __U, __m512i __A,
1975 				const int __imm)
1976 {
1977   return (__m128i) __builtin_ia32_extracti64x2_512_mask ((__v8di) __A,
1978 							 __imm,
1979 							 (__v2di) __W,
1980 							 (__mmask8)
1981 							 __U);
1982 }
1983 
1984 extern __inline __m128i
1985 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_extracti64x2_epi64(__mmask8 __U,__m512i __A,const int __imm)1986 _mm512_maskz_extracti64x2_epi64 (__mmask8 __U, __m512i __A,
1987 				 const int __imm)
1988 {
1989   return (__m128i) __builtin_ia32_extracti64x2_512_mask ((__v8di) __A,
1990 							 __imm,
1991 							 (__v2di)
1992 							 _mm_setzero_si128 (),
1993 							 (__mmask8)
1994 							 __U);
1995 }
1996 
1997 extern __inline __m512d
1998 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_range_round_pd(__m512d __A,__m512d __B,int __C,const int __R)1999 _mm512_range_round_pd (__m512d __A, __m512d __B, int __C,
2000 		       const int __R)
2001 {
2002   return (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A,
2003 						   (__v8df) __B, __C,
2004 						   (__v8df)
2005 						   _mm512_setzero_pd (),
2006 						   (__mmask8) -1,
2007 						   __R);
2008 }
2009 
2010 extern __inline __m512d
2011 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_range_round_pd(__m512d __W,__mmask8 __U,__m512d __A,__m512d __B,int __C,const int __R)2012 _mm512_mask_range_round_pd (__m512d __W, __mmask8 __U,
2013 			    __m512d __A, __m512d __B, int __C,
2014 			    const int __R)
2015 {
2016   return (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A,
2017 						   (__v8df) __B, __C,
2018 						   (__v8df) __W,
2019 						   (__mmask8) __U,
2020 						   __R);
2021 }
2022 
2023 extern __inline __m512d
2024 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_range_round_pd(__mmask8 __U,__m512d __A,__m512d __B,int __C,const int __R)2025 _mm512_maskz_range_round_pd (__mmask8 __U, __m512d __A, __m512d __B,
2026 			     int __C, const int __R)
2027 {
2028   return (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A,
2029 						   (__v8df) __B, __C,
2030 						   (__v8df)
2031 						   _mm512_setzero_pd (),
2032 						   (__mmask8) __U,
2033 						   __R);
2034 }
2035 
2036 extern __inline __m512
2037 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_range_round_ps(__m512 __A,__m512 __B,int __C,const int __R)2038 _mm512_range_round_ps (__m512 __A, __m512 __B, int __C, const int __R)
2039 {
2040   return (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A,
2041 						  (__v16sf) __B, __C,
2042 						  (__v16sf)
2043 						  _mm512_setzero_ps (),
2044 						  (__mmask16) -1,
2045 						  __R);
2046 }
2047 
2048 extern __inline __m512
2049 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_range_round_ps(__m512 __W,__mmask16 __U,__m512 __A,__m512 __B,int __C,const int __R)2050 _mm512_mask_range_round_ps (__m512 __W, __mmask16 __U,
2051 			    __m512 __A, __m512 __B, int __C,
2052 			    const int __R)
2053 {
2054   return (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A,
2055 						  (__v16sf) __B, __C,
2056 						  (__v16sf) __W,
2057 						  (__mmask16) __U,
2058 						  __R);
2059 }
2060 
2061 extern __inline __m512
2062 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_range_round_ps(__mmask16 __U,__m512 __A,__m512 __B,int __C,const int __R)2063 _mm512_maskz_range_round_ps (__mmask16 __U, __m512 __A, __m512 __B,
2064 			     int __C, const int __R)
2065 {
2066   return (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A,
2067 						  (__v16sf) __B, __C,
2068 						  (__v16sf)
2069 						  _mm512_setzero_ps (),
2070 						  (__mmask16) __U,
2071 						  __R);
2072 }
2073 
2074 extern __inline __m512i
2075 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_inserti32x8(__m512i __A,__m256i __B,const int __imm)2076 _mm512_inserti32x8 (__m512i __A, __m256i __B, const int __imm)
2077 {
2078   return (__m512i) __builtin_ia32_inserti32x8_mask ((__v16si) __A,
2079 						    (__v8si) __B,
2080 						    __imm,
2081 						    (__v16si)
2082 						    _mm512_setzero_si512 (),
2083 						    (__mmask16) -1);
2084 }
2085 
2086 extern __inline __m512i
2087 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_inserti32x8(__m512i __W,__mmask16 __U,__m512i __A,__m256i __B,const int __imm)2088 _mm512_mask_inserti32x8 (__m512i __W, __mmask16 __U, __m512i __A,
2089 			 __m256i __B, const int __imm)
2090 {
2091   return (__m512i) __builtin_ia32_inserti32x8_mask ((__v16si) __A,
2092 						    (__v8si) __B,
2093 						    __imm,
2094 						    (__v16si) __W,
2095 						    (__mmask16) __U);
2096 }
2097 
2098 extern __inline __m512i
2099 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_inserti32x8(__mmask16 __U,__m512i __A,__m256i __B,const int __imm)2100 _mm512_maskz_inserti32x8 (__mmask16 __U, __m512i __A, __m256i __B,
2101 			  const int __imm)
2102 {
2103   return (__m512i) __builtin_ia32_inserti32x8_mask ((__v16si) __A,
2104 						    (__v8si) __B,
2105 						    __imm,
2106 						    (__v16si)
2107 						    _mm512_setzero_si512 (),
2108 						    (__mmask16) __U);
2109 }
2110 
2111 extern __inline __m512
2112 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_insertf32x8(__m512 __A,__m256 __B,const int __imm)2113 _mm512_insertf32x8 (__m512 __A, __m256 __B, const int __imm)
2114 {
2115   return (__m512) __builtin_ia32_insertf32x8_mask ((__v16sf) __A,
2116 						   (__v8sf) __B,
2117 						   __imm,
2118 						   (__v16sf)
2119 						   _mm512_setzero_ps (),
2120 						   (__mmask16) -1);
2121 }
2122 
2123 extern __inline __m512
2124 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_insertf32x8(__m512 __W,__mmask16 __U,__m512 __A,__m256 __B,const int __imm)2125 _mm512_mask_insertf32x8 (__m512 __W, __mmask16 __U, __m512 __A,
2126 			 __m256 __B, const int __imm)
2127 {
2128   return (__m512) __builtin_ia32_insertf32x8_mask ((__v16sf) __A,
2129 						   (__v8sf) __B,
2130 						   __imm,
2131 						   (__v16sf) __W,
2132 						   (__mmask16) __U);
2133 }
2134 
2135 extern __inline __m512
2136 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_insertf32x8(__mmask16 __U,__m512 __A,__m256 __B,const int __imm)2137 _mm512_maskz_insertf32x8 (__mmask16 __U, __m512 __A, __m256 __B,
2138 			  const int __imm)
2139 {
2140   return (__m512) __builtin_ia32_insertf32x8_mask ((__v16sf) __A,
2141 						   (__v8sf) __B,
2142 						   __imm,
2143 						   (__v16sf)
2144 						   _mm512_setzero_ps (),
2145 						   (__mmask16) __U);
2146 }
2147 
2148 extern __inline __m512i
2149 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_inserti64x2(__m512i __A,__m128i __B,const int __imm)2150 _mm512_inserti64x2 (__m512i __A, __m128i __B, const int __imm)
2151 {
2152   return (__m512i) __builtin_ia32_inserti64x2_512_mask ((__v8di) __A,
2153 							(__v2di) __B,
2154 							__imm,
2155 							(__v8di)
2156 							_mm512_setzero_si512 (),
2157 							(__mmask8) -1);
2158 }
2159 
2160 extern __inline __m512i
2161 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_inserti64x2(__m512i __W,__mmask8 __U,__m512i __A,__m128i __B,const int __imm)2162 _mm512_mask_inserti64x2 (__m512i __W, __mmask8 __U, __m512i __A,
2163 			 __m128i __B, const int __imm)
2164 {
2165   return (__m512i) __builtin_ia32_inserti64x2_512_mask ((__v8di) __A,
2166 							(__v2di) __B,
2167 							__imm,
2168 							(__v8di) __W,
2169 							(__mmask8)
2170 							__U);
2171 }
2172 
2173 extern __inline __m512i
2174 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_inserti64x2(__mmask8 __U,__m512i __A,__m128i __B,const int __imm)2175 _mm512_maskz_inserti64x2 (__mmask8 __U, __m512i __A, __m128i __B,
2176 			  const int __imm)
2177 {
2178   return (__m512i) __builtin_ia32_inserti64x2_512_mask ((__v8di) __A,
2179 							(__v2di) __B,
2180 							__imm,
2181 							(__v8di)
2182 							_mm512_setzero_si512 (),
2183 							(__mmask8)
2184 							__U);
2185 }
2186 
2187 extern __inline __m512d
2188 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_insertf64x2(__m512d __A,__m128d __B,const int __imm)2189 _mm512_insertf64x2 (__m512d __A, __m128d __B, const int __imm)
2190 {
2191   return (__m512d) __builtin_ia32_insertf64x2_512_mask ((__v8df) __A,
2192 							(__v2df) __B,
2193 							__imm,
2194 							(__v8df)
2195 							_mm512_setzero_pd (),
2196 							(__mmask8) -1);
2197 }
2198 
2199 extern __inline __m512d
2200 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_insertf64x2(__m512d __W,__mmask8 __U,__m512d __A,__m128d __B,const int __imm)2201 _mm512_mask_insertf64x2 (__m512d __W, __mmask8 __U, __m512d __A,
2202 			 __m128d __B, const int __imm)
2203 {
2204   return (__m512d) __builtin_ia32_insertf64x2_512_mask ((__v8df) __A,
2205 							(__v2df) __B,
2206 							__imm,
2207 							(__v8df) __W,
2208 							(__mmask8)
2209 							__U);
2210 }
2211 
2212 extern __inline __m512d
2213 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_maskz_insertf64x2(__mmask8 __U,__m512d __A,__m128d __B,const int __imm)2214 _mm512_maskz_insertf64x2 (__mmask8 __U, __m512d __A, __m128d __B,
2215 			  const int __imm)
2216 {
2217   return (__m512d) __builtin_ia32_insertf64x2_512_mask ((__v8df) __A,
2218 							(__v2df) __B,
2219 							__imm,
2220 							(__v8df)
2221 							_mm512_setzero_pd (),
2222 							(__mmask8)
2223 							__U);
2224 }
2225 
2226 extern __inline __mmask8
2227 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_fpclass_pd_mask(__mmask8 __U,__m512d __A,const int __imm)2228 _mm512_mask_fpclass_pd_mask (__mmask8 __U, __m512d __A,
2229 			     const int __imm)
2230 {
2231   return (__mmask8) __builtin_ia32_fpclasspd512_mask ((__v8df) __A,
2232 						      __imm, __U);
2233 }
2234 
2235 extern __inline __mmask8
2236 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_fpclass_pd_mask(__m512d __A,const int __imm)2237 _mm512_fpclass_pd_mask (__m512d __A, const int __imm)
2238 {
2239   return (__mmask8) __builtin_ia32_fpclasspd512_mask ((__v8df) __A,
2240 						      __imm,
2241 						      (__mmask8) -1);
2242 }
2243 
2244 extern __inline __mmask16
2245 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_mask_fpclass_ps_mask(__mmask16 __U,__m512 __A,const int __imm)2246 _mm512_mask_fpclass_ps_mask (__mmask16 __U, __m512 __A,
2247 			     const int __imm)
2248 {
2249   return (__mmask16) __builtin_ia32_fpclassps512_mask ((__v16sf) __A,
2250 						       __imm, __U);
2251 }
2252 
2253 extern __inline __mmask16
2254 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm512_fpclass_ps_mask(__m512 __A,const int __imm)2255 _mm512_fpclass_ps_mask (__m512 __A, const int __imm)
2256 {
2257   return (__mmask16) __builtin_ia32_fpclassps512_mask ((__v16sf) __A,
2258 						       __imm,
2259 						       (__mmask16) -1);
2260 }
2261 
2262 #else
2263 #define _kshiftli_mask8(X, Y)						\
2264   ((__mmask8) __builtin_ia32_kshiftliqi ((__mmask8)(X), (__mmask8)(Y)))
2265 
2266 #define _kshiftri_mask8(X, Y)						\
2267   ((__mmask8) __builtin_ia32_kshiftriqi ((__mmask8)(X), (__mmask8)(Y)))
2268 
2269 #define _mm_range_sd(A, B, C)						 \
2270   ((__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df)(__m128d)(A), \
2271     (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (), 	 \
2272     (__mmask8) -1, _MM_FROUND_CUR_DIRECTION))
2273 
2274 #define _mm_mask_range_sd(W, U, A, B, C)				 \
2275   ((__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df)(__m128d)(A), \
2276     (__v2df)(__m128d)(B), (int)(C), (__v2df)(__m128d)(W), 		 \
2277     (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
2278 
2279 #define _mm_maskz_range_sd(U, A, B, C)					 \
2280   ((__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df)(__m128d)(A), \
2281     (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (), 	 \
2282     (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
2283 
2284 #define _mm_range_ss(A, B, C)						\
2285   ((__m128) __builtin_ia32_rangess128_mask_round ((__v4sf)(__m128)(A),	\
2286     (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (),		\
2287     (__mmask8) -1, _MM_FROUND_CUR_DIRECTION))
2288 
2289 #define _mm_mask_range_ss(W, U, A, B, C)				\
2290   ((__m128) __builtin_ia32_rangess128_mask_round ((__v4sf)(__m128)(A),	\
2291     (__v4sf)(__m128)(B), (int)(C), (__v4sf)(__m128)(W),			\
2292     (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
2293 
2294 #define _mm_maskz_range_ss(U, A, B, C)					\
2295   ((__m128) __builtin_ia32_rangess128_mask_round ((__v4sf)(__m128)(A),	\
2296     (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (),		\
2297     (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
2298 
2299 #define _mm_range_round_sd(A, B, C, R)					 \
2300   ((__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df)(__m128d)(A), \
2301     (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (),		 \
2302     (__mmask8) -1, (R)))
2303 
2304 #define _mm_mask_range_round_sd(W, U, A, B, C, R)			 \
2305   ((__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df)(__m128d)(A), \
2306     (__v2df)(__m128d)(B), (int)(C), (__v2df)(__m128d)(W),		 \
2307     (__mmask8)(U), (R)))
2308 
2309 #define _mm_maskz_range_round_sd(U, A, B, C, R)				 \
2310   ((__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df)(__m128d)(A), \
2311     (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (),		 \
2312     (__mmask8)(U), (R)))
2313 
2314 #define _mm_range_round_ss(A, B, C, R)					\
2315   ((__m128) __builtin_ia32_rangess128_mask_round ((__v4sf)(__m128)(A),	\
2316     (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (),		\
2317     (__mmask8) -1, (R)))
2318 
2319 #define _mm_mask_range_round_ss(W, U, A, B, C, R)			\
2320   ((__m128) __builtin_ia32_rangess128_mask_round ((__v4sf)(__m128)(A),	\
2321     (__v4sf)(__m128)(B), (int)(C), (__v4sf)(__m128)(W),			\
2322     (__mmask8)(U), (R)))
2323 
2324 #define _mm_maskz_range_round_ss(U, A, B, C, R)				\
2325   ((__m128) __builtin_ia32_rangess128_mask_round ((__v4sf)(__m128)(A),	\
2326     (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (),		\
2327     (__mmask8)(U), (R)))
2328 
2329 #define _mm512_cvtt_roundpd_epi64(A, B)		    \
2330   ((__m512i)__builtin_ia32_cvttpd2qq512_mask ((A), (__v8di)		\
2331 					      _mm512_setzero_si512 (),	\
2332 					      -1, (B)))
2333 
2334 #define _mm512_mask_cvtt_roundpd_epi64(W, U, A, B)  \
2335     ((__m512i)__builtin_ia32_cvttpd2qq512_mask ((A), (__v8di)(W), (U), (B)))
2336 
2337 #define _mm512_maskz_cvtt_roundpd_epi64(U, A, B)    \
2338     ((__m512i)__builtin_ia32_cvttpd2qq512_mask ((A), (__v8di)_mm512_setzero_si512 (), (U), (B)))
2339 
2340 #define _mm512_cvtt_roundpd_epu64(A, B)		    \
2341     ((__m512i)__builtin_ia32_cvttpd2uqq512_mask ((A), (__v8di)_mm512_setzero_si512 (), -1, (B)))
2342 
2343 #define _mm512_mask_cvtt_roundpd_epu64(W, U, A, B)  \
2344     ((__m512i)__builtin_ia32_cvttpd2uqq512_mask ((A), (__v8di)(W), (U), (B)))
2345 
2346 #define _mm512_maskz_cvtt_roundpd_epu64(U, A, B)    \
2347     ((__m512i)__builtin_ia32_cvttpd2uqq512_mask ((A), (__v8di)_mm512_setzero_si512 (), (U), (B)))
2348 
2349 #define _mm512_cvtt_roundps_epi64(A, B)		    \
2350     ((__m512i)__builtin_ia32_cvttps2qq512_mask ((A), (__v8di)_mm512_setzero_si512 (), -1, (B)))
2351 
2352 #define _mm512_mask_cvtt_roundps_epi64(W, U, A, B)  \
2353     ((__m512i)__builtin_ia32_cvttps2qq512_mask ((A), (__v8di)(W), (U), (B)))
2354 
2355 #define _mm512_maskz_cvtt_roundps_epi64(U, A, B)    \
2356     ((__m512i)__builtin_ia32_cvttps2qq512_mask ((A), (__v8di)_mm512_setzero_si512 (), (U), (B)))
2357 
2358 #define _mm512_cvtt_roundps_epu64(A, B)		    \
2359     ((__m512i)__builtin_ia32_cvttps2uqq512_mask ((A), (__v8di)_mm512_setzero_si512 (), -1, (B)))
2360 
2361 #define _mm512_mask_cvtt_roundps_epu64(W, U, A, B)  \
2362     ((__m512i)__builtin_ia32_cvttps2uqq512_mask ((A), (__v8di)(W), (U), (B)))
2363 
2364 #define _mm512_maskz_cvtt_roundps_epu64(U, A, B)    \
2365     ((__m512i)__builtin_ia32_cvttps2uqq512_mask ((A), (__v8di)_mm512_setzero_si512 (), (U), (B)))
2366 
2367 #define _mm512_cvt_roundpd_epi64(A, B)		    \
2368     ((__m512i)__builtin_ia32_cvtpd2qq512_mask ((A), (__v8di)_mm512_setzero_si512 (), -1, (B)))
2369 
2370 #define _mm512_mask_cvt_roundpd_epi64(W, U, A, B)   \
2371     ((__m512i)__builtin_ia32_cvtpd2qq512_mask ((A), (__v8di)(W), (U), (B)))
2372 
2373 #define _mm512_maskz_cvt_roundpd_epi64(U, A, B)     \
2374     ((__m512i)__builtin_ia32_cvtpd2qq512_mask ((A), (__v8di)_mm512_setzero_si512 (), (U), (B)))
2375 
2376 #define _mm512_cvt_roundpd_epu64(A, B)		    \
2377     ((__m512i)__builtin_ia32_cvtpd2uqq512_mask ((A), (__v8di)_mm512_setzero_si512 (), -1, (B)))
2378 
2379 #define _mm512_mask_cvt_roundpd_epu64(W, U, A, B)   \
2380     ((__m512i)__builtin_ia32_cvtpd2uqq512_mask ((A), (__v8di)(W), (U), (B)))
2381 
2382 #define _mm512_maskz_cvt_roundpd_epu64(U, A, B)     \
2383     ((__m512i)__builtin_ia32_cvtpd2uqq512_mask ((A), (__v8di)_mm512_setzero_si512 (), (U), (B)))
2384 
2385 #define _mm512_cvt_roundps_epi64(A, B)		    \
2386     ((__m512i)__builtin_ia32_cvtps2qq512_mask ((A), (__v8di)_mm512_setzero_si512 (), -1, (B)))
2387 
2388 #define _mm512_mask_cvt_roundps_epi64(W, U, A, B)   \
2389     ((__m512i)__builtin_ia32_cvtps2qq512_mask ((A), (__v8di)(W), (U), (B)))
2390 
2391 #define _mm512_maskz_cvt_roundps_epi64(U, A, B)     \
2392     ((__m512i)__builtin_ia32_cvtps2qq512_mask ((A), (__v8di)_mm512_setzero_si512 (), (U), (B)))
2393 
2394 #define _mm512_cvt_roundps_epu64(A, B)		    \
2395     ((__m512i)__builtin_ia32_cvtps2uqq512_mask ((A), (__v8di)_mm512_setzero_si512 (), -1, (B)))
2396 
2397 #define _mm512_mask_cvt_roundps_epu64(W, U, A, B)   \
2398     ((__m512i)__builtin_ia32_cvtps2uqq512_mask ((A), (__v8di)(W), (U), (B)))
2399 
2400 #define _mm512_maskz_cvt_roundps_epu64(U, A, B)     \
2401     ((__m512i)__builtin_ia32_cvtps2uqq512_mask ((A), (__v8di)_mm512_setzero_si512 (), (U), (B)))
2402 
2403 #define _mm512_cvt_roundepi64_ps(A, B)		    \
2404     ((__m256)__builtin_ia32_cvtqq2ps512_mask ((__v8di)(A), (__v8sf)_mm256_setzero_ps (), -1, (B)))
2405 
2406 #define _mm512_mask_cvt_roundepi64_ps(W, U, A, B)   \
2407     ((__m256)__builtin_ia32_cvtqq2ps512_mask ((__v8di)(A), (W), (U), (B)))
2408 
2409 #define _mm512_maskz_cvt_roundepi64_ps(U, A, B)     \
2410     ((__m256)__builtin_ia32_cvtqq2ps512_mask ((__v8di)(A), (__v8sf)_mm256_setzero_ps (), (U), (B)))
2411 
2412 #define _mm512_cvt_roundepu64_ps(A, B)		    \
2413     ((__m256)__builtin_ia32_cvtuqq2ps512_mask ((__v8di)(A), (__v8sf)_mm256_setzero_ps (), -1, (B)))
2414 
2415 #define _mm512_mask_cvt_roundepu64_ps(W, U, A, B)   \
2416     ((__m256)__builtin_ia32_cvtuqq2ps512_mask ((__v8di)(A), (W), (U), (B)))
2417 
2418 #define _mm512_maskz_cvt_roundepu64_ps(U, A, B)     \
2419     ((__m256)__builtin_ia32_cvtuqq2ps512_mask ((__v8di)(A), (__v8sf)_mm256_setzero_ps (), (U), (B)))
2420 
2421 #define _mm512_cvt_roundepi64_pd(A, B)		    \
2422     ((__m512d)__builtin_ia32_cvtqq2pd512_mask ((__v8di)(A), (__v8df)_mm512_setzero_pd (), -1, (B)))
2423 
2424 #define _mm512_mask_cvt_roundepi64_pd(W, U, A, B)   \
2425     ((__m512d)__builtin_ia32_cvtqq2pd512_mask ((__v8di)(A), (W), (U), (B)))
2426 
2427 #define _mm512_maskz_cvt_roundepi64_pd(U, A, B)     \
2428     ((__m512d)__builtin_ia32_cvtqq2pd512_mask ((__v8di)(A), (__v8df)_mm512_setzero_pd (), (U), (B)))
2429 
2430 #define _mm512_cvt_roundepu64_pd(A, B)		    \
2431     ((__m512d)__builtin_ia32_cvtuqq2pd512_mask ((__v8di)(A), (__v8df)_mm512_setzero_pd (), -1, (B)))
2432 
2433 #define _mm512_mask_cvt_roundepu64_pd(W, U, A, B)   \
2434     ((__m512d)__builtin_ia32_cvtuqq2pd512_mask ((__v8di)(A), (W), (U), (B)))
2435 
2436 #define _mm512_maskz_cvt_roundepu64_pd(U, A, B)     \
2437     ((__m512d)__builtin_ia32_cvtuqq2pd512_mask ((__v8di)(A), (__v8df)_mm512_setzero_pd (), (U), (B)))
2438 
2439 #define _mm512_reduce_pd(A, B)						\
2440   ((__m512d) __builtin_ia32_reducepd512_mask ((__v8df)(__m512d)(A),	\
2441     (int)(B), (__v8df)_mm512_setzero_pd (), (__mmask8)-1))
2442 
2443 #define _mm512_mask_reduce_pd(W, U, A, B)				\
2444   ((__m512d) __builtin_ia32_reducepd512_mask ((__v8df)(__m512d)(A),	\
2445     (int)(B), (__v8df)(__m512d)(W), (__mmask8)(U)))
2446 
2447 #define _mm512_maskz_reduce_pd(U, A, B)					\
2448   ((__m512d) __builtin_ia32_reducepd512_mask ((__v8df)(__m512d)(A),	\
2449     (int)(B), (__v8df)_mm512_setzero_pd (), (__mmask8)(U)))
2450 
2451 #define _mm512_reduce_ps(A, B)						\
2452   ((__m512) __builtin_ia32_reduceps512_mask ((__v16sf)(__m512)(A),	\
2453     (int)(B), (__v16sf)_mm512_setzero_ps (), (__mmask16)-1))
2454 
2455 #define _mm512_mask_reduce_ps(W, U, A, B)				\
2456   ((__m512) __builtin_ia32_reduceps512_mask ((__v16sf)(__m512)(A),	\
2457     (int)(B), (__v16sf)(__m512)(W), (__mmask16)(U)))
2458 
2459 #define _mm512_maskz_reduce_ps(U, A, B)					\
2460   ((__m512) __builtin_ia32_reduceps512_mask ((__v16sf)(__m512)(A),	\
2461     (int)(B), (__v16sf)_mm512_setzero_ps (), (__mmask16)(U)))
2462 
2463 #define _mm512_extractf32x8_ps(X, C)                                    \
2464   ((__m256) __builtin_ia32_extractf32x8_mask ((__v16sf)(__m512) (X),    \
2465     (int) (C), (__v8sf)(__m256) _mm256_setzero_ps (), (__mmask8)-1))
2466 
2467 #define _mm512_mask_extractf32x8_ps(W, U, X, C)                         \
2468   ((__m256) __builtin_ia32_extractf32x8_mask ((__v16sf)(__m512) (X),    \
2469     (int) (C), (__v8sf)(__m256) (W), (__mmask8) (U)))
2470 
2471 #define _mm512_maskz_extractf32x8_ps(U, X, C)                           \
2472   ((__m256) __builtin_ia32_extractf32x8_mask ((__v16sf)(__m512) (X),    \
2473     (int) (C), (__v8sf)(__m256) _mm256_setzero_ps (), (__mmask8) (U)))
2474 
2475 #define _mm512_extractf64x2_pd(X, C)                                    \
2476   ((__m128d) __builtin_ia32_extractf64x2_512_mask ((__v8df)(__m512d) (X),\
2477     (int) (C), (__v2df)(__m128d) _mm_setzero_pd (), (__mmask8)-1))
2478 
2479 #define _mm512_mask_extractf64x2_pd(W, U, X, C)                         \
2480   ((__m128d) __builtin_ia32_extractf64x2_512_mask ((__v8df)(__m512d) (X),\
2481     (int) (C), (__v2df)(__m128d) (W), (__mmask8) (U)))
2482 
2483 #define _mm512_maskz_extractf64x2_pd(U, X, C)                           \
2484   ((__m128d) __builtin_ia32_extractf64x2_512_mask ((__v8df)(__m512d) (X),\
2485     (int) (C), (__v2df)(__m128d) _mm_setzero_pd (), (__mmask8) (U)))
2486 
2487 #define _mm512_extracti32x8_epi32(X, C)                                 \
2488   ((__m256i) __builtin_ia32_extracti32x8_mask ((__v16si)(__m512i) (X),  \
2489     (int) (C), (__v8si)(__m256i) _mm256_setzero_si256 (), (__mmask8)-1))
2490 
2491 #define _mm512_mask_extracti32x8_epi32(W, U, X, C)                      \
2492   ((__m256i) __builtin_ia32_extracti32x8_mask ((__v16si)(__m512i) (X),  \
2493     (int) (C), (__v8si)(__m256i) (W), (__mmask8) (U)))
2494 
2495 #define _mm512_maskz_extracti32x8_epi32(U, X, C)                        \
2496   ((__m256i) __builtin_ia32_extracti32x8_mask ((__v16si)(__m512i) (X),  \
2497     (int) (C), (__v8si)(__m256i) _mm256_setzero_si256 (), (__mmask8) (U)))
2498 
2499 #define _mm512_extracti64x2_epi64(X, C)                                 \
2500   ((__m128i) __builtin_ia32_extracti64x2_512_mask ((__v8di)(__m512i) (X),\
2501     (int) (C), (__v2di)(__m128i) _mm_setzero_si128 (), (__mmask8)-1))
2502 
2503 #define _mm512_mask_extracti64x2_epi64(W, U, X, C)                      \
2504   ((__m128i) __builtin_ia32_extracti64x2_512_mask ((__v8di)(__m512i) (X),\
2505     (int) (C), (__v2di)(__m128i) (W), (__mmask8) (U)))
2506 
2507 #define _mm512_maskz_extracti64x2_epi64(U, X, C)                        \
2508   ((__m128i) __builtin_ia32_extracti64x2_512_mask ((__v8di)(__m512i) (X),\
2509     (int) (C), (__v2di)(__m128i) _mm_setzero_si128 (), (__mmask8) (U)))
2510 
2511 #define _mm512_range_pd(A, B, C)					\
2512   ((__m512d) __builtin_ia32_rangepd512_mask ((__v8df)(__m512d)(A),	\
2513     (__v8df)(__m512d)(B), (int)(C),					\
2514     (__v8df)_mm512_setzero_pd (), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION))
2515 
2516 #define _mm512_mask_range_pd(W, U, A, B, C)				\
2517   ((__m512d) __builtin_ia32_rangepd512_mask ((__v8df)(__m512d)(A),	\
2518     (__v8df)(__m512d)(B), (int)(C),					\
2519     (__v8df)(__m512d)(W), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
2520 
2521 #define _mm512_maskz_range_pd(U, A, B, C)				\
2522   ((__m512d) __builtin_ia32_rangepd512_mask ((__v8df)(__m512d)(A),	\
2523     (__v8df)(__m512d)(B), (int)(C),					\
2524     (__v8df)_mm512_setzero_pd (), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
2525 
2526 #define _mm512_range_ps(A, B, C)					\
2527   ((__m512) __builtin_ia32_rangeps512_mask ((__v16sf)(__m512)(A),	\
2528     (__v16sf)(__m512)(B), (int)(C),					\
2529     (__v16sf)_mm512_setzero_ps (), (__mmask16)-1, _MM_FROUND_CUR_DIRECTION))
2530 
2531 #define _mm512_mask_range_ps(W, U, A, B, C)				\
2532   ((__m512) __builtin_ia32_rangeps512_mask ((__v16sf)(__m512)(A),	\
2533     (__v16sf)(__m512)(B), (int)(C),					\
2534     (__v16sf)(__m512)(W), (__mmask16)(U), _MM_FROUND_CUR_DIRECTION))
2535 
2536 #define _mm512_maskz_range_ps(U, A, B, C)				\
2537   ((__m512) __builtin_ia32_rangeps512_mask ((__v16sf)(__m512)(A),	\
2538     (__v16sf)(__m512)(B), (int)(C),					\
2539     (__v16sf)_mm512_setzero_ps (), (__mmask16)(U), _MM_FROUND_CUR_DIRECTION))
2540 
2541 #define _mm512_range_round_pd(A, B, C, R)					\
2542   ((__m512d) __builtin_ia32_rangepd512_mask ((__v8df)(__m512d)(A),	\
2543     (__v8df)(__m512d)(B), (int)(C),					\
2544     (__v8df)_mm512_setzero_pd (), (__mmask8)-1, (R)))
2545 
2546 #define _mm512_mask_range_round_pd(W, U, A, B, C, R)				\
2547   ((__m512d) __builtin_ia32_rangepd512_mask ((__v8df)(__m512d)(A),	\
2548     (__v8df)(__m512d)(B), (int)(C),					\
2549     (__v8df)(__m512d)(W), (__mmask8)(U), (R)))
2550 
2551 #define _mm512_maskz_range_round_pd(U, A, B, C, R)				\
2552   ((__m512d) __builtin_ia32_rangepd512_mask ((__v8df)(__m512d)(A),	\
2553     (__v8df)(__m512d)(B), (int)(C),					\
2554     (__v8df)_mm512_setzero_pd (), (__mmask8)(U), (R)))
2555 
2556 #define _mm512_range_round_ps(A, B, C, R)					\
2557   ((__m512) __builtin_ia32_rangeps512_mask ((__v16sf)(__m512)(A),	\
2558     (__v16sf)(__m512)(B), (int)(C),					\
2559     (__v16sf)_mm512_setzero_ps (), (__mmask16)-1, (R)))
2560 
2561 #define _mm512_mask_range_round_ps(W, U, A, B, C, R)				\
2562   ((__m512) __builtin_ia32_rangeps512_mask ((__v16sf)(__m512)(A),	\
2563     (__v16sf)(__m512)(B), (int)(C),					\
2564     (__v16sf)(__m512)(W), (__mmask16)(U), (R)))
2565 
2566 #define _mm512_maskz_range_round_ps(U, A, B, C, R)				\
2567   ((__m512) __builtin_ia32_rangeps512_mask ((__v16sf)(__m512)(A),	\
2568     (__v16sf)(__m512)(B), (int)(C),					\
2569     (__v16sf)_mm512_setzero_ps (), (__mmask16)(U), (R)))
2570 
2571 #define _mm512_insertf64x2(X, Y, C)                                     \
2572   ((__m512d) __builtin_ia32_insertf64x2_512_mask ((__v8df)(__m512d) (X),\
2573     (__v2df)(__m128d) (Y), (int) (C), (__v8df)(__m512d) (X),            \
2574     (__mmask8)-1))
2575 
2576 #define _mm512_mask_insertf64x2(W, U, X, Y, C)                          \
2577   ((__m512d) __builtin_ia32_insertf64x2_512_mask ((__v8df)(__m512d) (X),\
2578     (__v2df)(__m128d) (Y), (int) (C), (__v8df)(__m512d) (W),            \
2579     (__mmask8) (U)))
2580 
2581 #define _mm512_maskz_insertf64x2(U, X, Y, C)                            \
2582   ((__m512d) __builtin_ia32_insertf64x2_512_mask ((__v8df)(__m512d) (X),\
2583     (__v2df)(__m128d) (Y), (int) (C),                                   \
2584     (__v8df)(__m512d) _mm512_setzero_pd (), (__mmask8) (U)))
2585 
2586 #define _mm512_inserti64x2(X, Y, C)                                     \
2587   ((__m512i) __builtin_ia32_inserti64x2_512_mask ((__v8di)(__m512i) (X),\
2588     (__v2di)(__m128i) (Y), (int) (C), (__v8di)(__m512i) (X), (__mmask8)-1))
2589 
2590 #define _mm512_mask_inserti64x2(W, U, X, Y, C)                          \
2591   ((__m512i) __builtin_ia32_inserti64x2_512_mask ((__v8di)(__m512i) (X),\
2592     (__v2di)(__m128i) (Y), (int) (C), (__v8di)(__m512i) (W),            \
2593     (__mmask8) (U)))
2594 
2595 #define _mm512_maskz_inserti64x2(U, X, Y, C)                            \
2596   ((__m512i) __builtin_ia32_inserti64x2_512_mask ((__v8di)(__m512i) (X),\
2597     (__v2di)(__m128i) (Y), (int) (C),                                   \
2598     (__v8di)(__m512i) _mm512_setzero_si512 (), (__mmask8) (U)))
2599 
2600 #define _mm512_insertf32x8(X, Y, C)                                     \
2601   ((__m512) __builtin_ia32_insertf32x8_mask ((__v16sf)(__m512) (X),     \
2602     (__v8sf)(__m256) (Y), (int) (C),\
2603     (__v16sf)(__m512)_mm512_setzero_ps (),\
2604     (__mmask16)-1))
2605 
2606 #define _mm512_mask_insertf32x8(W, U, X, Y, C)                          \
2607   ((__m512) __builtin_ia32_insertf32x8_mask ((__v16sf)(__m512) (X),     \
2608     (__v8sf)(__m256) (Y), (int) (C),\
2609     (__v16sf)(__m512)(W),\
2610     (__mmask16)(U)))
2611 
2612 #define _mm512_maskz_insertf32x8(U, X, Y, C)                            \
2613   ((__m512) __builtin_ia32_insertf32x8_mask ((__v16sf)(__m512) (X),     \
2614     (__v8sf)(__m256) (Y), (int) (C),\
2615     (__v16sf)(__m512)_mm512_setzero_ps (),\
2616     (__mmask16)(U)))
2617 
2618 #define _mm512_inserti32x8(X, Y, C)                                     \
2619   ((__m512i) __builtin_ia32_inserti32x8_mask ((__v16si)(__m512i) (X),   \
2620     (__v8si)(__m256i) (Y), (int) (C),\
2621     (__v16si)(__m512i)_mm512_setzero_si512 (),\
2622     (__mmask16)-1))
2623 
2624 #define _mm512_mask_inserti32x8(W, U, X, Y, C)                          \
2625   ((__m512i) __builtin_ia32_inserti32x8_mask ((__v16si)(__m512i) (X),   \
2626     (__v8si)(__m256i) (Y), (int) (C),\
2627     (__v16si)(__m512i)(W),\
2628     (__mmask16)(U)))
2629 
2630 #define _mm512_maskz_inserti32x8(U, X, Y, C)                            \
2631   ((__m512i) __builtin_ia32_inserti32x8_mask ((__v16si)(__m512i) (X),   \
2632     (__v8si)(__m256i) (Y), (int) (C),\
2633     (__v16si)(__m512i)_mm512_setzero_si512 (),\
2634     (__mmask16)(U)))
2635 
2636 #define _mm_fpclass_ss_mask(X, C)					\
2637   ((__mmask8) __builtin_ia32_fpclassss_mask ((__v4sf) (__m128) (X),	\
2638 					     (int) (C), (__mmask8) (-1))) \
2639 
2640 #define _mm_fpclass_sd_mask(X, C)					\
2641   ((__mmask8) __builtin_ia32_fpclasssd_mask ((__v2df) (__m128d) (X),	\
2642 					     (int) (C), (__mmask8) (-1))) \
2643 
2644 #define _mm_mask_fpclass_ss_mask(X, C, U)				\
2645   ((__mmask8) __builtin_ia32_fpclassss_mask ((__v4sf) (__m128) (X),	\
2646 					     (int) (C), (__mmask8) (U)))
2647 
2648 #define _mm_mask_fpclass_sd_mask(X, C, U)				\
2649   ((__mmask8) __builtin_ia32_fpclasssd_mask ((__v2df) (__m128d) (X),	\
2650 					     (int) (C), (__mmask8) (U)))
2651 
2652 #define _mm512_mask_fpclass_pd_mask(u, X, C)                            \
2653   ((__mmask8) __builtin_ia32_fpclasspd512_mask ((__v8df) (__m512d) (X), \
2654 						(int) (C), (__mmask8)(u)))
2655 
2656 #define _mm512_mask_fpclass_ps_mask(u, x, c)				\
2657   ((__mmask16) __builtin_ia32_fpclassps512_mask ((__v16sf) (__m512) (x),\
2658 						 (int) (c),(__mmask8)(u)))
2659 
2660 #define _mm512_fpclass_pd_mask(X, C)                                    \
2661   ((__mmask8) __builtin_ia32_fpclasspd512_mask ((__v8df) (__m512d) (X), \
2662 						(int) (C), (__mmask8)-1))
2663 
2664 #define _mm512_fpclass_ps_mask(x, c)                                    \
2665   ((__mmask16) __builtin_ia32_fpclassps512_mask ((__v16sf) (__m512) (x),\
2666 						 (int) (c),(__mmask8)-1))
2667 
2668 #define _mm_reduce_sd(A, B, C)						\
2669   ((__m128d) __builtin_ia32_reducesd_mask ((__v2df)(__m128d)(A),	\
2670     (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (),		\
2671     (__mmask8)-1))
2672 
2673 #define _mm_mask_reduce_sd(W, U, A, B, C)				\
2674   ((__m128d) __builtin_ia32_reducesd_mask ((__v2df)(__m128d)(A),	\
2675     (__v2df)(__m128d)(B), (int)(C), (__v2df)(__m128d)(W), (__mmask8)(U)))
2676 
2677 #define _mm_maskz_reduce_sd(U, A, B, C)					\
2678   ((__m128d) __builtin_ia32_reducesd_mask ((__v2df)(__m128d)(A),	\
2679     (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (),		\
2680     (__mmask8)(U)))
2681 
2682 #define _mm_reduce_ss(A, B, C)						\
2683   ((__m128) __builtin_ia32_reducess_mask ((__v4sf)(__m128)(A),		\
2684     (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (),		\
2685     (__mmask8)-1))
2686 
2687 #define _mm_mask_reduce_ss(W, U, A, B, C)				\
2688   ((__m128) __builtin_ia32_reducess_mask ((__v4sf)(__m128)(A),		\
2689     (__v4sf)(__m128)(B), (int)(C), (__v4sf)(__m128)(W), (__mmask8)(U)))
2690 
2691 #define _mm_maskz_reduce_ss(U, A, B, C)					\
2692   ((__m128) __builtin_ia32_reducess_mask ((__v4sf)(__m128)(A),		\
2693     (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (),		\
2694     (__mmask8)(U)))
2695 
2696 
2697 
2698 #endif
2699 
2700 #ifdef __DISABLE_AVX512DQ__
2701 #undef __DISABLE_AVX512DQ__
2702 #pragma GCC pop_options
2703 #endif /* __DISABLE_AVX512DQ__ */
2704 
2705 #endif /* _AVX512DQINTRIN_H_INCLUDED */
2706