1 /* Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
2 
3    This file is part of GCC.
4 
5    GCC is free software; you can redistribute it and/or modify
6    it under the terms of the GNU General Public License as published by
7    the Free Software Foundation; either version 2, or (at your option)
8    any later version.
9 
10    GCC is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13    GNU General Public License for more details.
14 
15    You should have received a copy of the GNU General Public License
16    along with GCC; see the file COPYING.  If not, write to
17    the Free Software Foundation, 51 Franklin Street, Fifth Floor,
18    Boston, MA 02110-1301, USA.  */
19 
20 /* As a special exception, if you include this header file into source
21    files compiled by GCC, this header file does not by itself cause
22    the resulting executable to be covered by the GNU General Public
23    License.  This exception does not however invalidate any other
24    reasons why the executable file might be covered by the GNU General
25    Public License.  */
26 
27 /* Implemented from the specification included in the Intel C++ Compiler
28    User Guide and Reference, version 8.0.  */
29 
30 #ifndef _EMMINTRIN_H_INCLUDED
31 #define _EMMINTRIN_H_INCLUDED
32 
33 #ifdef __SSE2__
34 #include <xmmintrin.h>
35 
36 /* SSE2 */
37 typedef double __v2df __attribute__ ((__vector_size__ (16)));
38 typedef long long __v2di __attribute__ ((__vector_size__ (16)));
39 typedef int __v4si __attribute__ ((__vector_size__ (16)));
40 typedef short __v8hi __attribute__ ((__vector_size__ (16)));
41 typedef char __v16qi __attribute__ ((__vector_size__ (16)));
42 
43 typedef __v2di __m128i;
44 typedef __v2df __m128d;
45 
46 /* Create a selector for use with the SHUFPD instruction.  */
47 #define _MM_SHUFFLE2(fp1,fp0) \
48  (((fp1) << 1) | (fp0))
49 
50 /* Create a vector with element 0 as F and the rest zero.  */
51 static __inline __m128d __attribute__((__always_inline__))
_mm_set_sd(double __F)52 _mm_set_sd (double __F)
53 {
54   return __extension__ (__m128d){ __F, 0 };
55 }
56 
57 /* Create a vector with both elements equal to F.  */
58 static __inline __m128d __attribute__((__always_inline__))
_mm_set1_pd(double __F)59 _mm_set1_pd (double __F)
60 {
61   return __extension__ (__m128d){ __F, __F };
62 }
63 
64 static __inline __m128d __attribute__((__always_inline__))
_mm_set_pd1(double __F)65 _mm_set_pd1 (double __F)
66 {
67   return _mm_set1_pd (__F);
68 }
69 
70 /* Create a vector with the lower value X and upper value W.  */
71 static __inline __m128d __attribute__((__always_inline__))
_mm_set_pd(double __W,double __X)72 _mm_set_pd (double __W, double __X)
73 {
74   return __extension__ (__m128d){ __X, __W };
75 }
76 
77 /* Create a vector with the lower value W and upper value X.  */
78 static __inline __m128d __attribute__((__always_inline__))
_mm_setr_pd(double __W,double __X)79 _mm_setr_pd (double __W, double __X)
80 {
81   return __extension__ (__m128d){ __W, __X };
82 }
83 
84 /* Create a vector of zeros.  */
85 static __inline __m128d __attribute__((__always_inline__))
_mm_setzero_pd(void)86 _mm_setzero_pd (void)
87 {
88   return __extension__ (__m128d){ 0.0, 0.0 };
89 }
90 
91 /* Sets the low DPFP value of A from the low value of B.  */
92 static __inline __m128d __attribute__((__always_inline__))
_mm_move_sd(__m128d __A,__m128d __B)93 _mm_move_sd (__m128d __A, __m128d __B)
94 {
95   return (__m128d) __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B);
96 }
97 
98 /* Load two DPFP values from P.  The address must be 16-byte aligned.  */
99 static __inline __m128d __attribute__((__always_inline__))
_mm_load_pd(double const * __P)100 _mm_load_pd (double const *__P)
101 {
102   return *(__m128d *)__P;
103 }
104 
105 /* Load two DPFP values from P.  The address need not be 16-byte aligned.  */
106 static __inline __m128d __attribute__((__always_inline__))
_mm_loadu_pd(double const * __P)107 _mm_loadu_pd (double const *__P)
108 {
109   return __builtin_ia32_loadupd (__P);
110 }
111 
112 /* Create a vector with all two elements equal to *P.  */
113 static __inline __m128d __attribute__((__always_inline__))
_mm_load1_pd(double const * __P)114 _mm_load1_pd (double const *__P)
115 {
116   return _mm_set1_pd (*__P);
117 }
118 
119 /* Create a vector with element 0 as *P and the rest zero.  */
120 static __inline __m128d __attribute__((__always_inline__))
_mm_load_sd(double const * __P)121 _mm_load_sd (double const *__P)
122 {
123   return _mm_set_sd (*__P);
124 }
125 
126 static __inline __m128d __attribute__((__always_inline__))
_mm_load_pd1(double const * __P)127 _mm_load_pd1 (double const *__P)
128 {
129   return _mm_load1_pd (__P);
130 }
131 
132 /* Load two DPFP values in reverse order.  The address must be aligned.  */
133 static __inline __m128d __attribute__((__always_inline__))
_mm_loadr_pd(double const * __P)134 _mm_loadr_pd (double const *__P)
135 {
136   __m128d __tmp = _mm_load_pd (__P);
137   return __builtin_ia32_shufpd (__tmp, __tmp, _MM_SHUFFLE2 (0,1));
138 }
139 
140 /* Store two DPFP values.  The address must be 16-byte aligned.  */
141 static __inline void __attribute__((__always_inline__))
_mm_store_pd(double * __P,__m128d __A)142 _mm_store_pd (double *__P, __m128d __A)
143 {
144   *(__m128d *)__P = __A;
145 }
146 
147 /* Store two DPFP values.  The address need not be 16-byte aligned.  */
148 static __inline void __attribute__((__always_inline__))
_mm_storeu_pd(double * __P,__m128d __A)149 _mm_storeu_pd (double *__P, __m128d __A)
150 {
151   __builtin_ia32_storeupd (__P, __A);
152 }
153 
154 /* Stores the lower DPFP value.  */
155 static __inline void __attribute__((__always_inline__))
_mm_store_sd(double * __P,__m128d __A)156 _mm_store_sd (double *__P, __m128d __A)
157 {
158   *__P = __builtin_ia32_vec_ext_v2df (__A, 0);
159 }
160 
161 static __inline void __attribute__((__always_inline__))
_mm_storel_pd(double * __P,__m128d __A)162 _mm_storel_pd (double *__P, __m128d __A)
163 {
164   _mm_store_sd (__P, __A);
165 }
166 
167 /* Stores the upper DPFP value.  */
168 static __inline void __attribute__((__always_inline__))
_mm_storeh_pd(double * __P,__m128d __A)169 _mm_storeh_pd (double *__P, __m128d __A)
170 {
171   *__P = __builtin_ia32_vec_ext_v2df (__A, 1);
172 }
173 
174 /* Store the lower DPFP value across two words.
175    The address must be 16-byte aligned.  */
176 static __inline void __attribute__((__always_inline__))
_mm_store1_pd(double * __P,__m128d __A)177 _mm_store1_pd (double *__P, __m128d __A)
178 {
179   _mm_store_pd (__P, __builtin_ia32_shufpd (__A, __A, _MM_SHUFFLE2 (0,0)));
180 }
181 
182 static __inline void __attribute__((__always_inline__))
_mm_store_pd1(double * __P,__m128d __A)183 _mm_store_pd1 (double *__P, __m128d __A)
184 {
185   _mm_store1_pd (__P, __A);
186 }
187 
188 /* Store two DPFP values in reverse order.  The address must be aligned.  */
189 static __inline void __attribute__((__always_inline__))
_mm_storer_pd(double * __P,__m128d __A)190 _mm_storer_pd (double *__P, __m128d __A)
191 {
192   _mm_store_pd (__P, __builtin_ia32_shufpd (__A, __A, _MM_SHUFFLE2 (0,1)));
193 }
194 
195 static __inline int __attribute__((__always_inline__))
_mm_cvtsi128_si32(__m128i __A)196 _mm_cvtsi128_si32 (__m128i __A)
197 {
198   return __builtin_ia32_vec_ext_v4si ((__v4si)__A, 0);
199 }
200 
201 #ifdef __x86_64__
202 static __inline long long __attribute__((__always_inline__))
_mm_cvtsi128_si64x(__m128i __A)203 _mm_cvtsi128_si64x (__m128i __A)
204 {
205   return __builtin_ia32_vec_ext_v2di ((__v2di)__A, 0);
206 }
207 #endif
208 
209 static __inline __m128d __attribute__((__always_inline__))
_mm_add_pd(__m128d __A,__m128d __B)210 _mm_add_pd (__m128d __A, __m128d __B)
211 {
212   return (__m128d)__builtin_ia32_addpd ((__v2df)__A, (__v2df)__B);
213 }
214 
215 static __inline __m128d __attribute__((__always_inline__))
_mm_add_sd(__m128d __A,__m128d __B)216 _mm_add_sd (__m128d __A, __m128d __B)
217 {
218   return (__m128d)__builtin_ia32_addsd ((__v2df)__A, (__v2df)__B);
219 }
220 
221 static __inline __m128d __attribute__((__always_inline__))
_mm_sub_pd(__m128d __A,__m128d __B)222 _mm_sub_pd (__m128d __A, __m128d __B)
223 {
224   return (__m128d)__builtin_ia32_subpd ((__v2df)__A, (__v2df)__B);
225 }
226 
227 static __inline __m128d __attribute__((__always_inline__))
_mm_sub_sd(__m128d __A,__m128d __B)228 _mm_sub_sd (__m128d __A, __m128d __B)
229 {
230   return (__m128d)__builtin_ia32_subsd ((__v2df)__A, (__v2df)__B);
231 }
232 
233 static __inline __m128d __attribute__((__always_inline__))
_mm_mul_pd(__m128d __A,__m128d __B)234 _mm_mul_pd (__m128d __A, __m128d __B)
235 {
236   return (__m128d)__builtin_ia32_mulpd ((__v2df)__A, (__v2df)__B);
237 }
238 
239 static __inline __m128d __attribute__((__always_inline__))
_mm_mul_sd(__m128d __A,__m128d __B)240 _mm_mul_sd (__m128d __A, __m128d __B)
241 {
242   return (__m128d)__builtin_ia32_mulsd ((__v2df)__A, (__v2df)__B);
243 }
244 
245 static __inline __m128d __attribute__((__always_inline__))
_mm_div_pd(__m128d __A,__m128d __B)246 _mm_div_pd (__m128d __A, __m128d __B)
247 {
248   return (__m128d)__builtin_ia32_divpd ((__v2df)__A, (__v2df)__B);
249 }
250 
251 static __inline __m128d __attribute__((__always_inline__))
_mm_div_sd(__m128d __A,__m128d __B)252 _mm_div_sd (__m128d __A, __m128d __B)
253 {
254   return (__m128d)__builtin_ia32_divsd ((__v2df)__A, (__v2df)__B);
255 }
256 
257 static __inline __m128d __attribute__((__always_inline__))
_mm_sqrt_pd(__m128d __A)258 _mm_sqrt_pd (__m128d __A)
259 {
260   return (__m128d)__builtin_ia32_sqrtpd ((__v2df)__A);
261 }
262 
263 /* Return pair {sqrt (A[0), B[1]}.  */
264 static __inline __m128d __attribute__((__always_inline__))
_mm_sqrt_sd(__m128d __A,__m128d __B)265 _mm_sqrt_sd (__m128d __A, __m128d __B)
266 {
267   __v2df __tmp = __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B);
268   return (__m128d)__builtin_ia32_sqrtsd ((__v2df)__tmp);
269 }
270 
271 static __inline __m128d __attribute__((__always_inline__))
_mm_min_pd(__m128d __A,__m128d __B)272 _mm_min_pd (__m128d __A, __m128d __B)
273 {
274   return (__m128d)__builtin_ia32_minpd ((__v2df)__A, (__v2df)__B);
275 }
276 
277 static __inline __m128d __attribute__((__always_inline__))
_mm_min_sd(__m128d __A,__m128d __B)278 _mm_min_sd (__m128d __A, __m128d __B)
279 {
280   return (__m128d)__builtin_ia32_minsd ((__v2df)__A, (__v2df)__B);
281 }
282 
283 static __inline __m128d __attribute__((__always_inline__))
_mm_max_pd(__m128d __A,__m128d __B)284 _mm_max_pd (__m128d __A, __m128d __B)
285 {
286   return (__m128d)__builtin_ia32_maxpd ((__v2df)__A, (__v2df)__B);
287 }
288 
289 static __inline __m128d __attribute__((__always_inline__))
_mm_max_sd(__m128d __A,__m128d __B)290 _mm_max_sd (__m128d __A, __m128d __B)
291 {
292   return (__m128d)__builtin_ia32_maxsd ((__v2df)__A, (__v2df)__B);
293 }
294 
295 static __inline __m128d __attribute__((__always_inline__))
_mm_and_pd(__m128d __A,__m128d __B)296 _mm_and_pd (__m128d __A, __m128d __B)
297 {
298   return (__m128d)__builtin_ia32_andpd ((__v2df)__A, (__v2df)__B);
299 }
300 
301 static __inline __m128d __attribute__((__always_inline__))
_mm_andnot_pd(__m128d __A,__m128d __B)302 _mm_andnot_pd (__m128d __A, __m128d __B)
303 {
304   return (__m128d)__builtin_ia32_andnpd ((__v2df)__A, (__v2df)__B);
305 }
306 
307 static __inline __m128d __attribute__((__always_inline__))
_mm_or_pd(__m128d __A,__m128d __B)308 _mm_or_pd (__m128d __A, __m128d __B)
309 {
310   return (__m128d)__builtin_ia32_orpd ((__v2df)__A, (__v2df)__B);
311 }
312 
313 static __inline __m128d __attribute__((__always_inline__))
_mm_xor_pd(__m128d __A,__m128d __B)314 _mm_xor_pd (__m128d __A, __m128d __B)
315 {
316   return (__m128d)__builtin_ia32_xorpd ((__v2df)__A, (__v2df)__B);
317 }
318 
319 static __inline __m128d __attribute__((__always_inline__))
_mm_cmpeq_pd(__m128d __A,__m128d __B)320 _mm_cmpeq_pd (__m128d __A, __m128d __B)
321 {
322   return (__m128d)__builtin_ia32_cmpeqpd ((__v2df)__A, (__v2df)__B);
323 }
324 
325 static __inline __m128d __attribute__((__always_inline__))
_mm_cmplt_pd(__m128d __A,__m128d __B)326 _mm_cmplt_pd (__m128d __A, __m128d __B)
327 {
328   return (__m128d)__builtin_ia32_cmpltpd ((__v2df)__A, (__v2df)__B);
329 }
330 
331 static __inline __m128d __attribute__((__always_inline__))
_mm_cmple_pd(__m128d __A,__m128d __B)332 _mm_cmple_pd (__m128d __A, __m128d __B)
333 {
334   return (__m128d)__builtin_ia32_cmplepd ((__v2df)__A, (__v2df)__B);
335 }
336 
337 static __inline __m128d __attribute__((__always_inline__))
_mm_cmpgt_pd(__m128d __A,__m128d __B)338 _mm_cmpgt_pd (__m128d __A, __m128d __B)
339 {
340   return (__m128d)__builtin_ia32_cmpgtpd ((__v2df)__A, (__v2df)__B);
341 }
342 
343 static __inline __m128d __attribute__((__always_inline__))
_mm_cmpge_pd(__m128d __A,__m128d __B)344 _mm_cmpge_pd (__m128d __A, __m128d __B)
345 {
346   return (__m128d)__builtin_ia32_cmpgepd ((__v2df)__A, (__v2df)__B);
347 }
348 
349 static __inline __m128d __attribute__((__always_inline__))
_mm_cmpneq_pd(__m128d __A,__m128d __B)350 _mm_cmpneq_pd (__m128d __A, __m128d __B)
351 {
352   return (__m128d)__builtin_ia32_cmpneqpd ((__v2df)__A, (__v2df)__B);
353 }
354 
355 static __inline __m128d __attribute__((__always_inline__))
_mm_cmpnlt_pd(__m128d __A,__m128d __B)356 _mm_cmpnlt_pd (__m128d __A, __m128d __B)
357 {
358   return (__m128d)__builtin_ia32_cmpnltpd ((__v2df)__A, (__v2df)__B);
359 }
360 
361 static __inline __m128d __attribute__((__always_inline__))
_mm_cmpnle_pd(__m128d __A,__m128d __B)362 _mm_cmpnle_pd (__m128d __A, __m128d __B)
363 {
364   return (__m128d)__builtin_ia32_cmpnlepd ((__v2df)__A, (__v2df)__B);
365 }
366 
367 static __inline __m128d __attribute__((__always_inline__))
_mm_cmpngt_pd(__m128d __A,__m128d __B)368 _mm_cmpngt_pd (__m128d __A, __m128d __B)
369 {
370   return (__m128d)__builtin_ia32_cmpngtpd ((__v2df)__A, (__v2df)__B);
371 }
372 
373 static __inline __m128d __attribute__((__always_inline__))
_mm_cmpnge_pd(__m128d __A,__m128d __B)374 _mm_cmpnge_pd (__m128d __A, __m128d __B)
375 {
376   return (__m128d)__builtin_ia32_cmpngepd ((__v2df)__A, (__v2df)__B);
377 }
378 
379 static __inline __m128d __attribute__((__always_inline__))
_mm_cmpord_pd(__m128d __A,__m128d __B)380 _mm_cmpord_pd (__m128d __A, __m128d __B)
381 {
382   return (__m128d)__builtin_ia32_cmpordpd ((__v2df)__A, (__v2df)__B);
383 }
384 
385 static __inline __m128d __attribute__((__always_inline__))
_mm_cmpunord_pd(__m128d __A,__m128d __B)386 _mm_cmpunord_pd (__m128d __A, __m128d __B)
387 {
388   return (__m128d)__builtin_ia32_cmpunordpd ((__v2df)__A, (__v2df)__B);
389 }
390 
391 static __inline __m128d __attribute__((__always_inline__))
_mm_cmpeq_sd(__m128d __A,__m128d __B)392 _mm_cmpeq_sd (__m128d __A, __m128d __B)
393 {
394   return (__m128d)__builtin_ia32_cmpeqsd ((__v2df)__A, (__v2df)__B);
395 }
396 
397 static __inline __m128d __attribute__((__always_inline__))
_mm_cmplt_sd(__m128d __A,__m128d __B)398 _mm_cmplt_sd (__m128d __A, __m128d __B)
399 {
400   return (__m128d)__builtin_ia32_cmpltsd ((__v2df)__A, (__v2df)__B);
401 }
402 
403 static __inline __m128d __attribute__((__always_inline__))
_mm_cmple_sd(__m128d __A,__m128d __B)404 _mm_cmple_sd (__m128d __A, __m128d __B)
405 {
406   return (__m128d)__builtin_ia32_cmplesd ((__v2df)__A, (__v2df)__B);
407 }
408 
409 static __inline __m128d __attribute__((__always_inline__))
_mm_cmpgt_sd(__m128d __A,__m128d __B)410 _mm_cmpgt_sd (__m128d __A, __m128d __B)
411 {
412   return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
413 					 (__v2df)
414 					 __builtin_ia32_cmpltsd ((__v2df) __B,
415 								 (__v2df)
416 								 __A));
417 }
418 
419 static __inline __m128d __attribute__((__always_inline__))
_mm_cmpge_sd(__m128d __A,__m128d __B)420 _mm_cmpge_sd (__m128d __A, __m128d __B)
421 {
422   return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
423 					 (__v2df)
424 					 __builtin_ia32_cmplesd ((__v2df) __B,
425 								 (__v2df)
426 								 __A));
427 }
428 
429 static __inline __m128d __attribute__((__always_inline__))
_mm_cmpneq_sd(__m128d __A,__m128d __B)430 _mm_cmpneq_sd (__m128d __A, __m128d __B)
431 {
432   return (__m128d)__builtin_ia32_cmpneqsd ((__v2df)__A, (__v2df)__B);
433 }
434 
435 static __inline __m128d __attribute__((__always_inline__))
_mm_cmpnlt_sd(__m128d __A,__m128d __B)436 _mm_cmpnlt_sd (__m128d __A, __m128d __B)
437 {
438   return (__m128d)__builtin_ia32_cmpnltsd ((__v2df)__A, (__v2df)__B);
439 }
440 
441 static __inline __m128d __attribute__((__always_inline__))
_mm_cmpnle_sd(__m128d __A,__m128d __B)442 _mm_cmpnle_sd (__m128d __A, __m128d __B)
443 {
444   return (__m128d)__builtin_ia32_cmpnlesd ((__v2df)__A, (__v2df)__B);
445 }
446 
447 static __inline __m128d __attribute__((__always_inline__))
_mm_cmpngt_sd(__m128d __A,__m128d __B)448 _mm_cmpngt_sd (__m128d __A, __m128d __B)
449 {
450   return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
451 					 (__v2df)
452 					 __builtin_ia32_cmpnltsd ((__v2df) __B,
453 								  (__v2df)
454 								  __A));
455 }
456 
457 static __inline __m128d __attribute__((__always_inline__))
_mm_cmpnge_sd(__m128d __A,__m128d __B)458 _mm_cmpnge_sd (__m128d __A, __m128d __B)
459 {
460   return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
461 					 (__v2df)
462 					 __builtin_ia32_cmpnlesd ((__v2df) __B,
463 								  (__v2df)
464 								  __A));
465 }
466 
467 static __inline __m128d __attribute__((__always_inline__))
_mm_cmpord_sd(__m128d __A,__m128d __B)468 _mm_cmpord_sd (__m128d __A, __m128d __B)
469 {
470   return (__m128d)__builtin_ia32_cmpordsd ((__v2df)__A, (__v2df)__B);
471 }
472 
473 static __inline __m128d __attribute__((__always_inline__))
_mm_cmpunord_sd(__m128d __A,__m128d __B)474 _mm_cmpunord_sd (__m128d __A, __m128d __B)
475 {
476   return (__m128d)__builtin_ia32_cmpunordsd ((__v2df)__A, (__v2df)__B);
477 }
478 
479 static __inline int __attribute__((__always_inline__))
_mm_comieq_sd(__m128d __A,__m128d __B)480 _mm_comieq_sd (__m128d __A, __m128d __B)
481 {
482   return __builtin_ia32_comisdeq ((__v2df)__A, (__v2df)__B);
483 }
484 
485 static __inline int __attribute__((__always_inline__))
_mm_comilt_sd(__m128d __A,__m128d __B)486 _mm_comilt_sd (__m128d __A, __m128d __B)
487 {
488   return __builtin_ia32_comisdlt ((__v2df)__A, (__v2df)__B);
489 }
490 
491 static __inline int __attribute__((__always_inline__))
_mm_comile_sd(__m128d __A,__m128d __B)492 _mm_comile_sd (__m128d __A, __m128d __B)
493 {
494   return __builtin_ia32_comisdle ((__v2df)__A, (__v2df)__B);
495 }
496 
497 static __inline int __attribute__((__always_inline__))
_mm_comigt_sd(__m128d __A,__m128d __B)498 _mm_comigt_sd (__m128d __A, __m128d __B)
499 {
500   return __builtin_ia32_comisdgt ((__v2df)__A, (__v2df)__B);
501 }
502 
503 static __inline int __attribute__((__always_inline__))
_mm_comige_sd(__m128d __A,__m128d __B)504 _mm_comige_sd (__m128d __A, __m128d __B)
505 {
506   return __builtin_ia32_comisdge ((__v2df)__A, (__v2df)__B);
507 }
508 
509 static __inline int __attribute__((__always_inline__))
_mm_comineq_sd(__m128d __A,__m128d __B)510 _mm_comineq_sd (__m128d __A, __m128d __B)
511 {
512   return __builtin_ia32_comisdneq ((__v2df)__A, (__v2df)__B);
513 }
514 
515 static __inline int __attribute__((__always_inline__))
_mm_ucomieq_sd(__m128d __A,__m128d __B)516 _mm_ucomieq_sd (__m128d __A, __m128d __B)
517 {
518   return __builtin_ia32_ucomisdeq ((__v2df)__A, (__v2df)__B);
519 }
520 
521 static __inline int __attribute__((__always_inline__))
_mm_ucomilt_sd(__m128d __A,__m128d __B)522 _mm_ucomilt_sd (__m128d __A, __m128d __B)
523 {
524   return __builtin_ia32_ucomisdlt ((__v2df)__A, (__v2df)__B);
525 }
526 
527 static __inline int __attribute__((__always_inline__))
_mm_ucomile_sd(__m128d __A,__m128d __B)528 _mm_ucomile_sd (__m128d __A, __m128d __B)
529 {
530   return __builtin_ia32_ucomisdle ((__v2df)__A, (__v2df)__B);
531 }
532 
533 static __inline int __attribute__((__always_inline__))
_mm_ucomigt_sd(__m128d __A,__m128d __B)534 _mm_ucomigt_sd (__m128d __A, __m128d __B)
535 {
536   return __builtin_ia32_ucomisdgt ((__v2df)__A, (__v2df)__B);
537 }
538 
539 static __inline int __attribute__((__always_inline__))
_mm_ucomige_sd(__m128d __A,__m128d __B)540 _mm_ucomige_sd (__m128d __A, __m128d __B)
541 {
542   return __builtin_ia32_ucomisdge ((__v2df)__A, (__v2df)__B);
543 }
544 
545 static __inline int __attribute__((__always_inline__))
_mm_ucomineq_sd(__m128d __A,__m128d __B)546 _mm_ucomineq_sd (__m128d __A, __m128d __B)
547 {
548   return __builtin_ia32_ucomisdneq ((__v2df)__A, (__v2df)__B);
549 }
550 
551 /* Create a vector of Qi, where i is the element number.  */
552 
553 static __inline __m128i __attribute__((__always_inline__))
_mm_set_epi64x(long long __q1,long long __q0)554 _mm_set_epi64x (long long __q1, long long __q0)
555 {
556   return __extension__ (__m128i)(__v2di){ __q0, __q1 };
557 }
558 
559 static __inline __m128i __attribute__((__always_inline__))
_mm_set_epi64(__m64 __q1,__m64 __q0)560 _mm_set_epi64 (__m64 __q1,  __m64 __q0)
561 {
562   return _mm_set_epi64x ((long long)__q1, (long long)__q0);
563 }
564 
565 static __inline __m128i __attribute__((__always_inline__))
_mm_set_epi32(int __q3,int __q2,int __q1,int __q0)566 _mm_set_epi32 (int __q3, int __q2, int __q1, int __q0)
567 {
568   return __extension__ (__m128i)(__v4si){ __q0, __q1, __q2, __q3 };
569 }
570 
571 static __inline __m128i __attribute__((__always_inline__))
_mm_set_epi16(short __q7,short __q6,short __q5,short __q4,short __q3,short __q2,short __q1,short __q0)572 _mm_set_epi16 (short __q7, short __q6, short __q5, short __q4,
573 	       short __q3, short __q2, short __q1, short __q0)
574 {
575   return __extension__ (__m128i)(__v8hi){
576     __q0, __q1, __q2, __q3, __q4, __q5, __q6, __q7 };
577 }
578 
579 static __inline __m128i __attribute__((__always_inline__))
_mm_set_epi8(char __q15,char __q14,char __q13,char __q12,char __q11,char __q10,char __q09,char __q08,char __q07,char __q06,char __q05,char __q04,char __q03,char __q02,char __q01,char __q00)580 _mm_set_epi8 (char __q15, char __q14, char __q13, char __q12,
581 	      char __q11, char __q10, char __q09, char __q08,
582 	      char __q07, char __q06, char __q05, char __q04,
583 	      char __q03, char __q02, char __q01, char __q00)
584 {
585   return __extension__ (__m128i)(__v16qi){
586     __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07,
587     __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15
588   };
589 }
590 
591 /* Set all of the elements of the vector to A.  */
592 
593 static __inline __m128i __attribute__((__always_inline__))
_mm_set1_epi64x(long long __A)594 _mm_set1_epi64x (long long __A)
595 {
596   return _mm_set_epi64x (__A, __A);
597 }
598 
599 static __inline __m128i __attribute__((__always_inline__))
_mm_set1_epi64(__m64 __A)600 _mm_set1_epi64 (__m64 __A)
601 {
602   return _mm_set_epi64 (__A, __A);
603 }
604 
605 static __inline __m128i __attribute__((__always_inline__))
_mm_set1_epi32(int __A)606 _mm_set1_epi32 (int __A)
607 {
608   return _mm_set_epi32 (__A, __A, __A, __A);
609 }
610 
611 static __inline __m128i __attribute__((__always_inline__))
_mm_set1_epi16(short __A)612 _mm_set1_epi16 (short __A)
613 {
614   return _mm_set_epi16 (__A, __A, __A, __A, __A, __A, __A, __A);
615 }
616 
617 static __inline __m128i __attribute__((__always_inline__))
_mm_set1_epi8(char __A)618 _mm_set1_epi8 (char __A)
619 {
620   return _mm_set_epi8 (__A, __A, __A, __A, __A, __A, __A, __A,
621 		       __A, __A, __A, __A, __A, __A, __A, __A);
622 }
623 
624 /* Create a vector of Qi, where i is the element number.
625    The parameter order is reversed from the _mm_set_epi* functions.  */
626 
627 static __inline __m128i __attribute__((__always_inline__))
_mm_setr_epi64(__m64 __q0,__m64 __q1)628 _mm_setr_epi64 (__m64 __q0, __m64 __q1)
629 {
630   return _mm_set_epi64 (__q1, __q0);
631 }
632 
633 static __inline __m128i __attribute__((__always_inline__))
_mm_setr_epi32(int __q0,int __q1,int __q2,int __q3)634 _mm_setr_epi32 (int __q0, int __q1, int __q2, int __q3)
635 {
636   return _mm_set_epi32 (__q3, __q2, __q1, __q0);
637 }
638 
639 static __inline __m128i __attribute__((__always_inline__))
_mm_setr_epi16(short __q0,short __q1,short __q2,short __q3,short __q4,short __q5,short __q6,short __q7)640 _mm_setr_epi16 (short __q0, short __q1, short __q2, short __q3,
641 	        short __q4, short __q5, short __q6, short __q7)
642 {
643   return _mm_set_epi16 (__q7, __q6, __q5, __q4, __q3, __q2, __q1, __q0);
644 }
645 
646 static __inline __m128i __attribute__((__always_inline__))
_mm_setr_epi8(char __q00,char __q01,char __q02,char __q03,char __q04,char __q05,char __q06,char __q07,char __q08,char __q09,char __q10,char __q11,char __q12,char __q13,char __q14,char __q15)647 _mm_setr_epi8 (char __q00, char __q01, char __q02, char __q03,
648 	       char __q04, char __q05, char __q06, char __q07,
649 	       char __q08, char __q09, char __q10, char __q11,
650 	       char __q12, char __q13, char __q14, char __q15)
651 {
652   return _mm_set_epi8 (__q15, __q14, __q13, __q12, __q11, __q10, __q09, __q08,
653 		       __q07, __q06, __q05, __q04, __q03, __q02, __q01, __q00);
654 }
655 
656 /* Create a vector with element 0 as *P and the rest zero.  */
657 
658 static __inline __m128i __attribute__((__always_inline__))
_mm_load_si128(__m128i const * __P)659 _mm_load_si128 (__m128i const *__P)
660 {
661   return *__P;
662 }
663 
664 static __inline __m128i __attribute__((__always_inline__))
_mm_loadu_si128(__m128i const * __P)665 _mm_loadu_si128 (__m128i const *__P)
666 {
667   return (__m128i) __builtin_ia32_loaddqu ((char const *)__P);
668 }
669 
670 static __inline __m128i __attribute__((__always_inline__))
_mm_loadl_epi64(__m128i const * __P)671 _mm_loadl_epi64 (__m128i const *__P)
672 {
673   return _mm_set_epi64 ((__m64)0LL, *(__m64 *)__P);
674 }
675 
676 static __inline void __attribute__((__always_inline__))
_mm_store_si128(__m128i * __P,__m128i __B)677 _mm_store_si128 (__m128i *__P, __m128i __B)
678 {
679   *__P = __B;
680 }
681 
682 static __inline void __attribute__((__always_inline__))
_mm_storeu_si128(__m128i * __P,__m128i __B)683 _mm_storeu_si128 (__m128i *__P, __m128i __B)
684 {
685   __builtin_ia32_storedqu ((char *)__P, (__v16qi)__B);
686 }
687 
688 static __inline void __attribute__((__always_inline__))
_mm_storel_epi64(__m128i * __P,__m128i __B)689 _mm_storel_epi64 (__m128i *__P, __m128i __B)
690 {
691   *(long long *)__P = __builtin_ia32_vec_ext_v2di ((__v2di)__B, 0);
692 }
693 
694 static __inline __m64 __attribute__((__always_inline__))
_mm_movepi64_pi64(__m128i __B)695 _mm_movepi64_pi64 (__m128i __B)
696 {
697   return (__m64) __builtin_ia32_vec_ext_v2di ((__v2di)__B, 0);
698 }
699 
700 static __inline __m128i __attribute__((__always_inline__))
_mm_movpi64_epi64(__m64 __A)701 _mm_movpi64_epi64 (__m64 __A)
702 {
703   return _mm_set_epi64 ((__m64)0LL, __A);
704 }
705 
706 static __inline __m128i __attribute__((__always_inline__))
_mm_move_epi64(__m128i __A)707 _mm_move_epi64 (__m128i __A)
708 {
709   return _mm_set_epi64 ((__m64)0LL, _mm_movepi64_pi64 (__A));
710 }
711 
712 /* Create a vector of zeros.  */
713 static __inline __m128i __attribute__((__always_inline__))
_mm_setzero_si128(void)714 _mm_setzero_si128 (void)
715 {
716   return __extension__ (__m128i)(__v4si){ 0, 0, 0, 0 };
717 }
718 
719 static __inline __m128d __attribute__((__always_inline__))
_mm_cvtepi32_pd(__m128i __A)720 _mm_cvtepi32_pd (__m128i __A)
721 {
722   return (__m128d)__builtin_ia32_cvtdq2pd ((__v4si) __A);
723 }
724 
725 static __inline __m128 __attribute__((__always_inline__))
_mm_cvtepi32_ps(__m128i __A)726 _mm_cvtepi32_ps (__m128i __A)
727 {
728   return (__m128)__builtin_ia32_cvtdq2ps ((__v4si) __A);
729 }
730 
731 static __inline __m128i __attribute__((__always_inline__))
_mm_cvtpd_epi32(__m128d __A)732 _mm_cvtpd_epi32 (__m128d __A)
733 {
734   return (__m128i)__builtin_ia32_cvtpd2dq ((__v2df) __A);
735 }
736 
737 static __inline __m64 __attribute__((__always_inline__))
_mm_cvtpd_pi32(__m128d __A)738 _mm_cvtpd_pi32 (__m128d __A)
739 {
740   return (__m64)__builtin_ia32_cvtpd2pi ((__v2df) __A);
741 }
742 
743 static __inline __m128 __attribute__((__always_inline__))
_mm_cvtpd_ps(__m128d __A)744 _mm_cvtpd_ps (__m128d __A)
745 {
746   return (__m128)__builtin_ia32_cvtpd2ps ((__v2df) __A);
747 }
748 
749 static __inline __m128i __attribute__((__always_inline__))
_mm_cvttpd_epi32(__m128d __A)750 _mm_cvttpd_epi32 (__m128d __A)
751 {
752   return (__m128i)__builtin_ia32_cvttpd2dq ((__v2df) __A);
753 }
754 
755 static __inline __m64 __attribute__((__always_inline__))
_mm_cvttpd_pi32(__m128d __A)756 _mm_cvttpd_pi32 (__m128d __A)
757 {
758   return (__m64)__builtin_ia32_cvttpd2pi ((__v2df) __A);
759 }
760 
761 static __inline __m128d __attribute__((__always_inline__))
_mm_cvtpi32_pd(__m64 __A)762 _mm_cvtpi32_pd (__m64 __A)
763 {
764   return (__m128d)__builtin_ia32_cvtpi2pd ((__v2si) __A);
765 }
766 
767 static __inline __m128i __attribute__((__always_inline__))
_mm_cvtps_epi32(__m128 __A)768 _mm_cvtps_epi32 (__m128 __A)
769 {
770   return (__m128i)__builtin_ia32_cvtps2dq ((__v4sf) __A);
771 }
772 
773 static __inline __m128i __attribute__((__always_inline__))
_mm_cvttps_epi32(__m128 __A)774 _mm_cvttps_epi32 (__m128 __A)
775 {
776   return (__m128i)__builtin_ia32_cvttps2dq ((__v4sf) __A);
777 }
778 
779 static __inline __m128d __attribute__((__always_inline__))
_mm_cvtps_pd(__m128 __A)780 _mm_cvtps_pd (__m128 __A)
781 {
782   return (__m128d)__builtin_ia32_cvtps2pd ((__v4sf) __A);
783 }
784 
785 static __inline int __attribute__((__always_inline__))
_mm_cvtsd_si32(__m128d __A)786 _mm_cvtsd_si32 (__m128d __A)
787 {
788   return __builtin_ia32_cvtsd2si ((__v2df) __A);
789 }
790 
791 #ifdef __x86_64__
792 static __inline long long __attribute__((__always_inline__))
_mm_cvtsd_si64x(__m128d __A)793 _mm_cvtsd_si64x (__m128d __A)
794 {
795   return __builtin_ia32_cvtsd2si64 ((__v2df) __A);
796 }
797 #endif
798 
799 static __inline int __attribute__((__always_inline__))
_mm_cvttsd_si32(__m128d __A)800 _mm_cvttsd_si32 (__m128d __A)
801 {
802   return __builtin_ia32_cvttsd2si ((__v2df) __A);
803 }
804 
805 #ifdef __x86_64__
806 static __inline long long __attribute__((__always_inline__))
_mm_cvttsd_si64x(__m128d __A)807 _mm_cvttsd_si64x (__m128d __A)
808 {
809   return __builtin_ia32_cvttsd2si64 ((__v2df) __A);
810 }
811 #endif
812 
813 static __inline __m128 __attribute__((__always_inline__))
_mm_cvtsd_ss(__m128 __A,__m128d __B)814 _mm_cvtsd_ss (__m128 __A, __m128d __B)
815 {
816   return (__m128)__builtin_ia32_cvtsd2ss ((__v4sf) __A, (__v2df) __B);
817 }
818 
819 static __inline __m128d __attribute__((__always_inline__))
_mm_cvtsi32_sd(__m128d __A,int __B)820 _mm_cvtsi32_sd (__m128d __A, int __B)
821 {
822   return (__m128d)__builtin_ia32_cvtsi2sd ((__v2df) __A, __B);
823 }
824 
825 #ifdef __x86_64__
826 static __inline __m128d __attribute__((__always_inline__))
_mm_cvtsi64x_sd(__m128d __A,long long __B)827 _mm_cvtsi64x_sd (__m128d __A, long long __B)
828 {
829   return (__m128d)__builtin_ia32_cvtsi642sd ((__v2df) __A, __B);
830 }
831 #endif
832 
833 static __inline __m128d __attribute__((__always_inline__))
_mm_cvtss_sd(__m128d __A,__m128 __B)834 _mm_cvtss_sd (__m128d __A, __m128 __B)
835 {
836   return (__m128d)__builtin_ia32_cvtss2sd ((__v2df) __A, (__v4sf)__B);
837 }
838 
839 #define _mm_shuffle_pd(__A, __B, __C) ((__m128d)__builtin_ia32_shufpd ((__v2df)__A, (__v2df)__B, (__C)))
840 
841 static __inline __m128d __attribute__((__always_inline__))
_mm_unpackhi_pd(__m128d __A,__m128d __B)842 _mm_unpackhi_pd (__m128d __A, __m128d __B)
843 {
844   return (__m128d)__builtin_ia32_unpckhpd ((__v2df)__A, (__v2df)__B);
845 }
846 
847 static __inline __m128d __attribute__((__always_inline__))
_mm_unpacklo_pd(__m128d __A,__m128d __B)848 _mm_unpacklo_pd (__m128d __A, __m128d __B)
849 {
850   return (__m128d)__builtin_ia32_unpcklpd ((__v2df)__A, (__v2df)__B);
851 }
852 
853 static __inline __m128d __attribute__((__always_inline__))
_mm_loadh_pd(__m128d __A,double const * __B)854 _mm_loadh_pd (__m128d __A, double const *__B)
855 {
856   return (__m128d)__builtin_ia32_loadhpd ((__v2df)__A, __B);
857 }
858 
859 static __inline __m128d __attribute__((__always_inline__))
_mm_loadl_pd(__m128d __A,double const * __B)860 _mm_loadl_pd (__m128d __A, double const *__B)
861 {
862   return (__m128d)__builtin_ia32_loadlpd ((__v2df)__A, __B);
863 }
864 
865 static __inline int __attribute__((__always_inline__))
_mm_movemask_pd(__m128d __A)866 _mm_movemask_pd (__m128d __A)
867 {
868   return __builtin_ia32_movmskpd ((__v2df)__A);
869 }
870 
871 static __inline __m128i __attribute__((__always_inline__))
_mm_packs_epi16(__m128i __A,__m128i __B)872 _mm_packs_epi16 (__m128i __A, __m128i __B)
873 {
874   return (__m128i)__builtin_ia32_packsswb128 ((__v8hi)__A, (__v8hi)__B);
875 }
876 
877 static __inline __m128i __attribute__((__always_inline__))
_mm_packs_epi32(__m128i __A,__m128i __B)878 _mm_packs_epi32 (__m128i __A, __m128i __B)
879 {
880   return (__m128i)__builtin_ia32_packssdw128 ((__v4si)__A, (__v4si)__B);
881 }
882 
883 static __inline __m128i __attribute__((__always_inline__))
_mm_packus_epi16(__m128i __A,__m128i __B)884 _mm_packus_epi16 (__m128i __A, __m128i __B)
885 {
886   return (__m128i)__builtin_ia32_packuswb128 ((__v8hi)__A, (__v8hi)__B);
887 }
888 
889 static __inline __m128i __attribute__((__always_inline__))
_mm_unpackhi_epi8(__m128i __A,__m128i __B)890 _mm_unpackhi_epi8 (__m128i __A, __m128i __B)
891 {
892   return (__m128i)__builtin_ia32_punpckhbw128 ((__v16qi)__A, (__v16qi)__B);
893 }
894 
895 static __inline __m128i __attribute__((__always_inline__))
_mm_unpackhi_epi16(__m128i __A,__m128i __B)896 _mm_unpackhi_epi16 (__m128i __A, __m128i __B)
897 {
898   return (__m128i)__builtin_ia32_punpckhwd128 ((__v8hi)__A, (__v8hi)__B);
899 }
900 
901 static __inline __m128i __attribute__((__always_inline__))
_mm_unpackhi_epi32(__m128i __A,__m128i __B)902 _mm_unpackhi_epi32 (__m128i __A, __m128i __B)
903 {
904   return (__m128i)__builtin_ia32_punpckhdq128 ((__v4si)__A, (__v4si)__B);
905 }
906 
907 static __inline __m128i __attribute__((__always_inline__))
_mm_unpackhi_epi64(__m128i __A,__m128i __B)908 _mm_unpackhi_epi64 (__m128i __A, __m128i __B)
909 {
910   return (__m128i)__builtin_ia32_punpckhqdq128 ((__v2di)__A, (__v2di)__B);
911 }
912 
913 static __inline __m128i __attribute__((__always_inline__))
_mm_unpacklo_epi8(__m128i __A,__m128i __B)914 _mm_unpacklo_epi8 (__m128i __A, __m128i __B)
915 {
916   return (__m128i)__builtin_ia32_punpcklbw128 ((__v16qi)__A, (__v16qi)__B);
917 }
918 
919 static __inline __m128i __attribute__((__always_inline__))
_mm_unpacklo_epi16(__m128i __A,__m128i __B)920 _mm_unpacklo_epi16 (__m128i __A, __m128i __B)
921 {
922   return (__m128i)__builtin_ia32_punpcklwd128 ((__v8hi)__A, (__v8hi)__B);
923 }
924 
925 static __inline __m128i __attribute__((__always_inline__))
_mm_unpacklo_epi32(__m128i __A,__m128i __B)926 _mm_unpacklo_epi32 (__m128i __A, __m128i __B)
927 {
928   return (__m128i)__builtin_ia32_punpckldq128 ((__v4si)__A, (__v4si)__B);
929 }
930 
931 static __inline __m128i __attribute__((__always_inline__))
_mm_unpacklo_epi64(__m128i __A,__m128i __B)932 _mm_unpacklo_epi64 (__m128i __A, __m128i __B)
933 {
934   return (__m128i)__builtin_ia32_punpcklqdq128 ((__v2di)__A, (__v2di)__B);
935 }
936 
937 static __inline __m128i __attribute__((__always_inline__))
_mm_add_epi8(__m128i __A,__m128i __B)938 _mm_add_epi8 (__m128i __A, __m128i __B)
939 {
940   return (__m128i)__builtin_ia32_paddb128 ((__v16qi)__A, (__v16qi)__B);
941 }
942 
943 static __inline __m128i __attribute__((__always_inline__))
_mm_add_epi16(__m128i __A,__m128i __B)944 _mm_add_epi16 (__m128i __A, __m128i __B)
945 {
946   return (__m128i)__builtin_ia32_paddw128 ((__v8hi)__A, (__v8hi)__B);
947 }
948 
949 static __inline __m128i __attribute__((__always_inline__))
_mm_add_epi32(__m128i __A,__m128i __B)950 _mm_add_epi32 (__m128i __A, __m128i __B)
951 {
952   return (__m128i)__builtin_ia32_paddd128 ((__v4si)__A, (__v4si)__B);
953 }
954 
955 static __inline __m128i __attribute__((__always_inline__))
_mm_add_epi64(__m128i __A,__m128i __B)956 _mm_add_epi64 (__m128i __A, __m128i __B)
957 {
958   return (__m128i)__builtin_ia32_paddq128 ((__v2di)__A, (__v2di)__B);
959 }
960 
961 static __inline __m128i __attribute__((__always_inline__))
_mm_adds_epi8(__m128i __A,__m128i __B)962 _mm_adds_epi8 (__m128i __A, __m128i __B)
963 {
964   return (__m128i)__builtin_ia32_paddsb128 ((__v16qi)__A, (__v16qi)__B);
965 }
966 
967 static __inline __m128i __attribute__((__always_inline__))
_mm_adds_epi16(__m128i __A,__m128i __B)968 _mm_adds_epi16 (__m128i __A, __m128i __B)
969 {
970   return (__m128i)__builtin_ia32_paddsw128 ((__v8hi)__A, (__v8hi)__B);
971 }
972 
973 static __inline __m128i __attribute__((__always_inline__))
_mm_adds_epu8(__m128i __A,__m128i __B)974 _mm_adds_epu8 (__m128i __A, __m128i __B)
975 {
976   return (__m128i)__builtin_ia32_paddusb128 ((__v16qi)__A, (__v16qi)__B);
977 }
978 
979 static __inline __m128i __attribute__((__always_inline__))
_mm_adds_epu16(__m128i __A,__m128i __B)980 _mm_adds_epu16 (__m128i __A, __m128i __B)
981 {
982   return (__m128i)__builtin_ia32_paddusw128 ((__v8hi)__A, (__v8hi)__B);
983 }
984 
985 static __inline __m128i __attribute__((__always_inline__))
_mm_sub_epi8(__m128i __A,__m128i __B)986 _mm_sub_epi8 (__m128i __A, __m128i __B)
987 {
988   return (__m128i)__builtin_ia32_psubb128 ((__v16qi)__A, (__v16qi)__B);
989 }
990 
991 static __inline __m128i __attribute__((__always_inline__))
_mm_sub_epi16(__m128i __A,__m128i __B)992 _mm_sub_epi16 (__m128i __A, __m128i __B)
993 {
994   return (__m128i)__builtin_ia32_psubw128 ((__v8hi)__A, (__v8hi)__B);
995 }
996 
997 static __inline __m128i __attribute__((__always_inline__))
_mm_sub_epi32(__m128i __A,__m128i __B)998 _mm_sub_epi32 (__m128i __A, __m128i __B)
999 {
1000   return (__m128i)__builtin_ia32_psubd128 ((__v4si)__A, (__v4si)__B);
1001 }
1002 
1003 static __inline __m128i __attribute__((__always_inline__))
_mm_sub_epi64(__m128i __A,__m128i __B)1004 _mm_sub_epi64 (__m128i __A, __m128i __B)
1005 {
1006   return (__m128i)__builtin_ia32_psubq128 ((__v2di)__A, (__v2di)__B);
1007 }
1008 
1009 static __inline __m128i __attribute__((__always_inline__))
_mm_subs_epi8(__m128i __A,__m128i __B)1010 _mm_subs_epi8 (__m128i __A, __m128i __B)
1011 {
1012   return (__m128i)__builtin_ia32_psubsb128 ((__v16qi)__A, (__v16qi)__B);
1013 }
1014 
1015 static __inline __m128i __attribute__((__always_inline__))
_mm_subs_epi16(__m128i __A,__m128i __B)1016 _mm_subs_epi16 (__m128i __A, __m128i __B)
1017 {
1018   return (__m128i)__builtin_ia32_psubsw128 ((__v8hi)__A, (__v8hi)__B);
1019 }
1020 
1021 static __inline __m128i __attribute__((__always_inline__))
_mm_subs_epu8(__m128i __A,__m128i __B)1022 _mm_subs_epu8 (__m128i __A, __m128i __B)
1023 {
1024   return (__m128i)__builtin_ia32_psubusb128 ((__v16qi)__A, (__v16qi)__B);
1025 }
1026 
1027 static __inline __m128i __attribute__((__always_inline__))
_mm_subs_epu16(__m128i __A,__m128i __B)1028 _mm_subs_epu16 (__m128i __A, __m128i __B)
1029 {
1030   return (__m128i)__builtin_ia32_psubusw128 ((__v8hi)__A, (__v8hi)__B);
1031 }
1032 
1033 static __inline __m128i __attribute__((__always_inline__))
_mm_madd_epi16(__m128i __A,__m128i __B)1034 _mm_madd_epi16 (__m128i __A, __m128i __B)
1035 {
1036   return (__m128i)__builtin_ia32_pmaddwd128 ((__v8hi)__A, (__v8hi)__B);
1037 }
1038 
1039 static __inline __m128i __attribute__((__always_inline__))
_mm_mulhi_epi16(__m128i __A,__m128i __B)1040 _mm_mulhi_epi16 (__m128i __A, __m128i __B)
1041 {
1042   return (__m128i)__builtin_ia32_pmulhw128 ((__v8hi)__A, (__v8hi)__B);
1043 }
1044 
1045 static __inline __m128i __attribute__((__always_inline__))
_mm_mullo_epi16(__m128i __A,__m128i __B)1046 _mm_mullo_epi16 (__m128i __A, __m128i __B)
1047 {
1048   return (__m128i)__builtin_ia32_pmullw128 ((__v8hi)__A, (__v8hi)__B);
1049 }
1050 
1051 static __inline __m64 __attribute__((__always_inline__))
_mm_mul_su32(__m64 __A,__m64 __B)1052 _mm_mul_su32 (__m64 __A, __m64 __B)
1053 {
1054   return (__m64)__builtin_ia32_pmuludq ((__v2si)__A, (__v2si)__B);
1055 }
1056 
1057 static __inline __m128i __attribute__((__always_inline__))
_mm_mul_epu32(__m128i __A,__m128i __B)1058 _mm_mul_epu32 (__m128i __A, __m128i __B)
1059 {
1060   return (__m128i)__builtin_ia32_pmuludq128 ((__v4si)__A, (__v4si)__B);
1061 }
1062 
1063 static __inline __m128i __attribute__((__always_inline__))
_mm_slli_epi16(__m128i __A,int __B)1064 _mm_slli_epi16 (__m128i __A, int __B)
1065 {
1066   return (__m128i)__builtin_ia32_psllwi128 ((__v8hi)__A, __B);
1067 }
1068 
1069 static __inline __m128i __attribute__((__always_inline__))
_mm_slli_epi32(__m128i __A,int __B)1070 _mm_slli_epi32 (__m128i __A, int __B)
1071 {
1072   return (__m128i)__builtin_ia32_pslldi128 ((__v4si)__A, __B);
1073 }
1074 
1075 static __inline __m128i __attribute__((__always_inline__))
_mm_slli_epi64(__m128i __A,int __B)1076 _mm_slli_epi64 (__m128i __A, int __B)
1077 {
1078   return (__m128i)__builtin_ia32_psllqi128 ((__v2di)__A, __B);
1079 }
1080 
1081 static __inline __m128i __attribute__((__always_inline__))
_mm_srai_epi16(__m128i __A,int __B)1082 _mm_srai_epi16 (__m128i __A, int __B)
1083 {
1084   return (__m128i)__builtin_ia32_psrawi128 ((__v8hi)__A, __B);
1085 }
1086 
1087 static __inline __m128i __attribute__((__always_inline__))
_mm_srai_epi32(__m128i __A,int __B)1088 _mm_srai_epi32 (__m128i __A, int __B)
1089 {
1090   return (__m128i)__builtin_ia32_psradi128 ((__v4si)__A, __B);
1091 }
1092 
1093 #if 0
1094 static __m128i __attribute__((__always_inline__))
1095 _mm_srli_si128 (__m128i __A, const int __B)
1096 {
1097   return ((__m128i)__builtin_ia32_psrldqi128 (__A, __B))
1098 }
1099 
1100 static __m128i __attribute__((__always_inline__))
1101 _mm_srli_si128 (__m128i __A, const int __B)
1102 {
1103   return ((__m128i)__builtin_ia32_pslldqi128 (__A, __B))
1104 }
1105 #else
1106 #define _mm_srli_si128(__A, __B) \
1107   ((__m128i)__builtin_ia32_psrldqi128 (__A, (__B) * 8))
1108 #define _mm_slli_si128(__A, __B) \
1109   ((__m128i)__builtin_ia32_pslldqi128 (__A, (__B) * 8))
1110 #endif
1111 
1112 static __inline __m128i __attribute__((__always_inline__))
_mm_srli_epi16(__m128i __A,int __B)1113 _mm_srli_epi16 (__m128i __A, int __B)
1114 {
1115   return (__m128i)__builtin_ia32_psrlwi128 ((__v8hi)__A, __B);
1116 }
1117 
1118 static __inline __m128i __attribute__((__always_inline__))
_mm_srli_epi32(__m128i __A,int __B)1119 _mm_srli_epi32 (__m128i __A, int __B)
1120 {
1121   return (__m128i)__builtin_ia32_psrldi128 ((__v4si)__A, __B);
1122 }
1123 
1124 static __inline __m128i __attribute__((__always_inline__))
_mm_srli_epi64(__m128i __A,int __B)1125 _mm_srli_epi64 (__m128i __A, int __B)
1126 {
1127   return (__m128i)__builtin_ia32_psrlqi128 ((__v2di)__A, __B);
1128 }
1129 
1130 static __inline __m128i __attribute__((__always_inline__))
_mm_sll_epi16(__m128i __A,__m128i __B)1131 _mm_sll_epi16 (__m128i __A, __m128i __B)
1132 {
1133   return _mm_slli_epi16 (__A, _mm_cvtsi128_si32 (__B));
1134 }
1135 
1136 static __inline __m128i __attribute__((__always_inline__))
_mm_sll_epi32(__m128i __A,__m128i __B)1137 _mm_sll_epi32 (__m128i __A, __m128i __B)
1138 {
1139   return _mm_slli_epi32 (__A, _mm_cvtsi128_si32 (__B));
1140 }
1141 
1142 static __inline __m128i __attribute__((__always_inline__))
_mm_sll_epi64(__m128i __A,__m128i __B)1143 _mm_sll_epi64 (__m128i __A, __m128i __B)
1144 {
1145   return _mm_slli_epi64 (__A, _mm_cvtsi128_si32 (__B));
1146 }
1147 
1148 static __inline __m128i __attribute__((__always_inline__))
_mm_sra_epi16(__m128i __A,__m128i __B)1149 _mm_sra_epi16 (__m128i __A, __m128i __B)
1150 {
1151   return _mm_srai_epi16 (__A, _mm_cvtsi128_si32 (__B));
1152 }
1153 
1154 static __inline __m128i __attribute__((__always_inline__))
_mm_sra_epi32(__m128i __A,__m128i __B)1155 _mm_sra_epi32 (__m128i __A, __m128i __B)
1156 {
1157   return _mm_srai_epi32 (__A, _mm_cvtsi128_si32 (__B));
1158 }
1159 
1160 static __inline __m128i __attribute__((__always_inline__))
_mm_srl_epi16(__m128i __A,__m128i __B)1161 _mm_srl_epi16 (__m128i __A, __m128i __B)
1162 {
1163   return _mm_srli_epi16 (__A, _mm_cvtsi128_si32 (__B));
1164 }
1165 
1166 static __inline __m128i __attribute__((__always_inline__))
_mm_srl_epi32(__m128i __A,__m128i __B)1167 _mm_srl_epi32 (__m128i __A, __m128i __B)
1168 {
1169   return _mm_srli_epi32 (__A, _mm_cvtsi128_si32 (__B));
1170 }
1171 
1172 static __inline __m128i __attribute__((__always_inline__))
_mm_srl_epi64(__m128i __A,__m128i __B)1173 _mm_srl_epi64 (__m128i __A, __m128i __B)
1174 {
1175   return _mm_srli_epi64 (__A, _mm_cvtsi128_si32 (__B));
1176 }
1177 
1178 static __inline __m128i __attribute__((__always_inline__))
_mm_and_si128(__m128i __A,__m128i __B)1179 _mm_and_si128 (__m128i __A, __m128i __B)
1180 {
1181   return (__m128i)__builtin_ia32_pand128 ((__v2di)__A, (__v2di)__B);
1182 }
1183 
1184 static __inline __m128i __attribute__((__always_inline__))
_mm_andnot_si128(__m128i __A,__m128i __B)1185 _mm_andnot_si128 (__m128i __A, __m128i __B)
1186 {
1187   return (__m128i)__builtin_ia32_pandn128 ((__v2di)__A, (__v2di)__B);
1188 }
1189 
1190 static __inline __m128i __attribute__((__always_inline__))
_mm_or_si128(__m128i __A,__m128i __B)1191 _mm_or_si128 (__m128i __A, __m128i __B)
1192 {
1193   return (__m128i)__builtin_ia32_por128 ((__v2di)__A, (__v2di)__B);
1194 }
1195 
1196 static __inline __m128i __attribute__((__always_inline__))
_mm_xor_si128(__m128i __A,__m128i __B)1197 _mm_xor_si128 (__m128i __A, __m128i __B)
1198 {
1199   return (__m128i)__builtin_ia32_pxor128 ((__v2di)__A, (__v2di)__B);
1200 }
1201 
1202 static __inline __m128i __attribute__((__always_inline__))
_mm_cmpeq_epi8(__m128i __A,__m128i __B)1203 _mm_cmpeq_epi8 (__m128i __A, __m128i __B)
1204 {
1205   return (__m128i)__builtin_ia32_pcmpeqb128 ((__v16qi)__A, (__v16qi)__B);
1206 }
1207 
1208 static __inline __m128i __attribute__((__always_inline__))
_mm_cmpeq_epi16(__m128i __A,__m128i __B)1209 _mm_cmpeq_epi16 (__m128i __A, __m128i __B)
1210 {
1211   return (__m128i)__builtin_ia32_pcmpeqw128 ((__v8hi)__A, (__v8hi)__B);
1212 }
1213 
1214 static __inline __m128i __attribute__((__always_inline__))
_mm_cmpeq_epi32(__m128i __A,__m128i __B)1215 _mm_cmpeq_epi32 (__m128i __A, __m128i __B)
1216 {
1217   return (__m128i)__builtin_ia32_pcmpeqd128 ((__v4si)__A, (__v4si)__B);
1218 }
1219 
1220 static __inline __m128i __attribute__((__always_inline__))
_mm_cmplt_epi8(__m128i __A,__m128i __B)1221 _mm_cmplt_epi8 (__m128i __A, __m128i __B)
1222 {
1223   return (__m128i)__builtin_ia32_pcmpgtb128 ((__v16qi)__B, (__v16qi)__A);
1224 }
1225 
1226 static __inline __m128i __attribute__((__always_inline__))
_mm_cmplt_epi16(__m128i __A,__m128i __B)1227 _mm_cmplt_epi16 (__m128i __A, __m128i __B)
1228 {
1229   return (__m128i)__builtin_ia32_pcmpgtw128 ((__v8hi)__B, (__v8hi)__A);
1230 }
1231 
1232 static __inline __m128i __attribute__((__always_inline__))
_mm_cmplt_epi32(__m128i __A,__m128i __B)1233 _mm_cmplt_epi32 (__m128i __A, __m128i __B)
1234 {
1235   return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__B, (__v4si)__A);
1236 }
1237 
1238 static __inline __m128i __attribute__((__always_inline__))
_mm_cmpgt_epi8(__m128i __A,__m128i __B)1239 _mm_cmpgt_epi8 (__m128i __A, __m128i __B)
1240 {
1241   return (__m128i)__builtin_ia32_pcmpgtb128 ((__v16qi)__A, (__v16qi)__B);
1242 }
1243 
1244 static __inline __m128i __attribute__((__always_inline__))
_mm_cmpgt_epi16(__m128i __A,__m128i __B)1245 _mm_cmpgt_epi16 (__m128i __A, __m128i __B)
1246 {
1247   return (__m128i)__builtin_ia32_pcmpgtw128 ((__v8hi)__A, (__v8hi)__B);
1248 }
1249 
1250 static __inline __m128i __attribute__((__always_inline__))
_mm_cmpgt_epi32(__m128i __A,__m128i __B)1251 _mm_cmpgt_epi32 (__m128i __A, __m128i __B)
1252 {
1253   return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__A, (__v4si)__B);
1254 }
1255 
1256 #if 0
1257 static __inline int __attribute__((__always_inline__))
1258 _mm_extract_epi16 (__m128i const __A, int const __N)
1259 {
1260   return __builtin_ia32_vec_ext_v8hi ((__v8hi)__A, __N);
1261 }
1262 
1263 static __inline __m128i __attribute__((__always_inline__))
1264 _mm_insert_epi16 (__m128i const __A, int const __D, int const __N)
1265 {
1266   return (__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)__A, __D, __N);
1267 }
1268 #else
1269 #define _mm_extract_epi16(A, N) \
1270   ((int) __builtin_ia32_vec_ext_v8hi ((__v8hi)(A), (N)))
1271 #define _mm_insert_epi16(A, D, N) \
1272   ((__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)(A), (D), (N)))
1273 #endif
1274 
1275 static __inline __m128i __attribute__((__always_inline__))
_mm_max_epi16(__m128i __A,__m128i __B)1276 _mm_max_epi16 (__m128i __A, __m128i __B)
1277 {
1278   return (__m128i)__builtin_ia32_pmaxsw128 ((__v8hi)__A, (__v8hi)__B);
1279 }
1280 
1281 static __inline __m128i __attribute__((__always_inline__))
_mm_max_epu8(__m128i __A,__m128i __B)1282 _mm_max_epu8 (__m128i __A, __m128i __B)
1283 {
1284   return (__m128i)__builtin_ia32_pmaxub128 ((__v16qi)__A, (__v16qi)__B);
1285 }
1286 
1287 static __inline __m128i __attribute__((__always_inline__))
_mm_min_epi16(__m128i __A,__m128i __B)1288 _mm_min_epi16 (__m128i __A, __m128i __B)
1289 {
1290   return (__m128i)__builtin_ia32_pminsw128 ((__v8hi)__A, (__v8hi)__B);
1291 }
1292 
1293 static __inline __m128i __attribute__((__always_inline__))
_mm_min_epu8(__m128i __A,__m128i __B)1294 _mm_min_epu8 (__m128i __A, __m128i __B)
1295 {
1296   return (__m128i)__builtin_ia32_pminub128 ((__v16qi)__A, (__v16qi)__B);
1297 }
1298 
1299 static __inline int __attribute__((__always_inline__))
_mm_movemask_epi8(__m128i __A)1300 _mm_movemask_epi8 (__m128i __A)
1301 {
1302   return __builtin_ia32_pmovmskb128 ((__v16qi)__A);
1303 }
1304 
1305 static __inline __m128i __attribute__((__always_inline__))
_mm_mulhi_epu16(__m128i __A,__m128i __B)1306 _mm_mulhi_epu16 (__m128i __A, __m128i __B)
1307 {
1308   return (__m128i)__builtin_ia32_pmulhuw128 ((__v8hi)__A, (__v8hi)__B);
1309 }
1310 
1311 #define _mm_shufflehi_epi16(__A, __B) ((__m128i)__builtin_ia32_pshufhw ((__v8hi)__A, __B))
1312 #define _mm_shufflelo_epi16(__A, __B) ((__m128i)__builtin_ia32_pshuflw ((__v8hi)__A, __B))
1313 #define _mm_shuffle_epi32(__A, __B) ((__m128i)__builtin_ia32_pshufd ((__v4si)__A, __B))
1314 
1315 static __inline void __attribute__((__always_inline__))
_mm_maskmoveu_si128(__m128i __A,__m128i __B,char * __C)1316 _mm_maskmoveu_si128 (__m128i __A, __m128i __B, char *__C)
1317 {
1318   __builtin_ia32_maskmovdqu ((__v16qi)__A, (__v16qi)__B, __C);
1319 }
1320 
1321 static __inline __m128i __attribute__((__always_inline__))
_mm_avg_epu8(__m128i __A,__m128i __B)1322 _mm_avg_epu8 (__m128i __A, __m128i __B)
1323 {
1324   return (__m128i)__builtin_ia32_pavgb128 ((__v16qi)__A, (__v16qi)__B);
1325 }
1326 
1327 static __inline __m128i __attribute__((__always_inline__))
_mm_avg_epu16(__m128i __A,__m128i __B)1328 _mm_avg_epu16 (__m128i __A, __m128i __B)
1329 {
1330   return (__m128i)__builtin_ia32_pavgw128 ((__v8hi)__A, (__v8hi)__B);
1331 }
1332 
1333 static __inline __m128i __attribute__((__always_inline__))
_mm_sad_epu8(__m128i __A,__m128i __B)1334 _mm_sad_epu8 (__m128i __A, __m128i __B)
1335 {
1336   return (__m128i)__builtin_ia32_psadbw128 ((__v16qi)__A, (__v16qi)__B);
1337 }
1338 
1339 static __inline void __attribute__((__always_inline__))
_mm_stream_si32(int * __A,int __B)1340 _mm_stream_si32 (int *__A, int __B)
1341 {
1342   __builtin_ia32_movnti (__A, __B);
1343 }
1344 
1345 static __inline void __attribute__((__always_inline__))
_mm_stream_si128(__m128i * __A,__m128i __B)1346 _mm_stream_si128 (__m128i *__A, __m128i __B)
1347 {
1348   __builtin_ia32_movntdq ((__v2di *)__A, (__v2di)__B);
1349 }
1350 
1351 static __inline void __attribute__((__always_inline__))
_mm_stream_pd(double * __A,__m128d __B)1352 _mm_stream_pd (double *__A, __m128d __B)
1353 {
1354   __builtin_ia32_movntpd (__A, (__v2df)__B);
1355 }
1356 
1357 static __inline void __attribute__((__always_inline__))
_mm_clflush(void const * __A)1358 _mm_clflush (void const *__A)
1359 {
1360   __builtin_ia32_clflush (__A);
1361 }
1362 
1363 static __inline void __attribute__((__always_inline__))
_mm_lfence(void)1364 _mm_lfence (void)
1365 {
1366   __builtin_ia32_lfence ();
1367 }
1368 
1369 static __inline void __attribute__((__always_inline__))
_mm_mfence(void)1370 _mm_mfence (void)
1371 {
1372   __builtin_ia32_mfence ();
1373 }
1374 
1375 static __inline __m128i __attribute__((__always_inline__))
_mm_cvtsi32_si128(int __A)1376 _mm_cvtsi32_si128 (int __A)
1377 {
1378   return _mm_set_epi32 (0, 0, 0, __A);
1379 }
1380 
1381 #ifdef __x86_64__
1382 static __inline __m128i __attribute__((__always_inline__))
_mm_cvtsi64x_si128(long long __A)1383 _mm_cvtsi64x_si128 (long long __A)
1384 {
1385   return _mm_set_epi64x (0, __A);
1386 }
1387 #endif
1388 
1389 /* Casts between various SP, DP, INT vector types.  Note that these do no
1390    conversion of values, they just change the type.  */
1391 static __inline __m128 __attribute__((__always_inline__))
_mm_castpd_ps(__m128d __A)1392 _mm_castpd_ps(__m128d __A)
1393 {
1394   return (__m128) __A;
1395 }
1396 
1397 static __inline __m128i __attribute__((__always_inline__))
_mm_castpd_si128(__m128d __A)1398 _mm_castpd_si128(__m128d __A)
1399 {
1400   return (__m128i) __A;
1401 }
1402 
1403 static __inline __m128d __attribute__((__always_inline__))
_mm_castps_pd(__m128 __A)1404 _mm_castps_pd(__m128 __A)
1405 {
1406   return (__m128d) __A;
1407 }
1408 
1409 static __inline __m128i __attribute__((__always_inline__))
_mm_castps_si128(__m128 __A)1410 _mm_castps_si128(__m128 __A)
1411 {
1412   return (__m128i) __A;
1413 }
1414 
1415 static __inline __m128 __attribute__((__always_inline__))
_mm_castsi128_ps(__m128i __A)1416 _mm_castsi128_ps(__m128i __A)
1417 {
1418   return (__m128) __A;
1419 }
1420 
1421 static __inline __m128d __attribute__((__always_inline__))
_mm_castsi128_pd(__m128i __A)1422 _mm_castsi128_pd(__m128i __A)
1423 {
1424   return (__m128d) __A;
1425 }
1426 
1427 #endif /* __SSE2__  */
1428 
1429 #endif /* _EMMINTRIN_H_INCLUDED */
1430