1 /* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
2 
3    This file is part of GCC.
4 
5    GCC is free software; you can redistribute it and/or modify
6    it under the terms of the GNU General Public License as published by
7    the Free Software Foundation; either version 2, or (at your option)
8    any later version.
9 
10    GCC is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13    GNU General Public License for more details.
14 
15    You should have received a copy of the GNU General Public License
16    along with GCC; see the file COPYING.  If not, write to
17    the Free Software Foundation, 59 Temple Place - Suite 330,
18    Boston, MA 02111-1307, USA.  */
19 
20 /* As a special exception, if you include this header file into source
21    files compiled by GCC, this header file does not by itself cause
22    the resulting executable to be covered by the GNU General Public
23    License.  This exception does not however invalidate any other
24    reasons why the executable file might be covered by the GNU General
25    Public License.  */
26 
27 /* Implemented from the specification included in the Intel C++ Compiler
28    User Guide and Reference, version 8.0.  */
29 
30 #ifndef _EMMINTRIN_H_INCLUDED
31 #define _EMMINTRIN_H_INCLUDED
32 
33 #ifdef __SSE2__
34 #include <xmmintrin.h>
35 
36 /* SSE2 */
37 typedef int __v2df __attribute__ ((mode (V2DF)));
38 typedef int __v2di __attribute__ ((mode (V2DI)));
39 typedef int __v4si __attribute__ ((mode (V4SI)));
40 typedef int __v8hi __attribute__ ((mode (V8HI)));
41 typedef int __v16qi __attribute__ ((mode (V16QI)));
42 
43 /* Create a selector for use with the SHUFPD instruction.  */
44 #define _MM_SHUFFLE2(fp1,fp0) \
45  (((fp1) << 1) | (fp0))
46 
47 #define __m128i __v2di
48 #define __m128d __v2df
49 
50 /* Create a vector with element 0 as *P and the rest zero.  */
51 static __inline __m128d
_mm_load_sd(double const * __P)52 _mm_load_sd (double const *__P)
53 {
54   return (__m128d) __builtin_ia32_loadsd (__P);
55 }
56 
57 /* Create a vector with all two elements equal to *P.  */
58 static __inline __m128d
_mm_load1_pd(double const * __P)59 _mm_load1_pd (double const *__P)
60 {
61   __v2df __tmp = __builtin_ia32_loadsd (__P);
62   return (__m128d) __builtin_ia32_shufpd (__tmp, __tmp, _MM_SHUFFLE2 (0,0));
63 }
64 
65 static __inline __m128d
_mm_load_pd1(double const * __P)66 _mm_load_pd1 (double const *__P)
67 {
68   return _mm_load1_pd (__P);
69 }
70 
71 /* Load two DPFP values from P.  The address must be 16-byte aligned.  */
72 static __inline __m128d
_mm_load_pd(double const * __P)73 _mm_load_pd (double const *__P)
74 {
75   return (__m128d) __builtin_ia32_loadapd (__P);
76 }
77 
78 /* Load two DPFP values from P.  The address need not be 16-byte aligned.  */
79 static __inline __m128d
_mm_loadu_pd(double const * __P)80 _mm_loadu_pd (double const *__P)
81 {
82   return (__m128d) __builtin_ia32_loadupd (__P);
83 }
84 
85 /* Load two DPFP values in reverse order.  The address must be aligned.  */
86 static __inline __m128d
_mm_loadr_pd(double const * __P)87 _mm_loadr_pd (double const *__P)
88 {
89   __v2df __tmp = __builtin_ia32_loadapd (__P);
90   return (__m128d) __builtin_ia32_shufpd (__tmp, __tmp, _MM_SHUFFLE2 (0,1));
91 }
92 
93 /* Create a vector with element 0 as F and the rest zero.  */
94 static __inline __m128d
_mm_set_sd(double __F)95 _mm_set_sd (double __F)
96 {
97   return (__m128d) __builtin_ia32_loadsd (&__F);
98 }
99 
100 /* Create a vector with all two elements equal to F.  */
101 static __inline __m128d
_mm_set1_pd(double __F)102 _mm_set1_pd (double __F)
103 {
104   __v2df __tmp = __builtin_ia32_loadsd (&__F);
105   return (__m128d) __builtin_ia32_shufpd (__tmp, __tmp, _MM_SHUFFLE2 (0,0));
106 }
107 
108 static __inline __m128d
_mm_set_pd1(double __F)109 _mm_set_pd1 (double __F)
110 {
111   return _mm_set1_pd (__F);
112 }
113 
114 /* Create the vector [Z Y].  */
115 static __inline __m128d
_mm_set_pd(double __Z,double __Y)116 _mm_set_pd (double __Z, double __Y)
117 {
118   return (__v2df) {__Y, __Z};
119 }
120 
121 /* Create the vector [Y Z].  */
122 static __inline __m128d
_mm_setr_pd(double __Z,double __Y)123 _mm_setr_pd (double __Z, double __Y)
124 {
125   return _mm_set_pd (__Y, __Z);
126 }
127 
128 /* Create a vector of zeros.  */
129 static __inline __m128d
_mm_setzero_pd(void)130 _mm_setzero_pd (void)
131 {
132   return (__m128d) __builtin_ia32_setzeropd ();
133 }
134 
135 /* Stores the lower DPFP value.  */
136 static __inline void
_mm_store_sd(double * __P,__m128d __A)137 _mm_store_sd (double *__P, __m128d __A)
138 {
139   __builtin_ia32_storesd (__P, (__v2df)__A);
140 }
141 
142 /* Store the lower DPFP value across two words.  */
143 static __inline void
_mm_store1_pd(double * __P,__m128d __A)144 _mm_store1_pd (double *__P, __m128d __A)
145 {
146   __v2df __va = (__v2df)__A;
147   __v2df __tmp = __builtin_ia32_shufpd (__va, __va, _MM_SHUFFLE2 (0,0));
148   __builtin_ia32_storeapd (__P, __tmp);
149 }
150 
151 static __inline void
_mm_store_pd1(double * __P,__m128d __A)152 _mm_store_pd1 (double *__P, __m128d __A)
153 {
154   _mm_store1_pd (__P, __A);
155 }
156 
157 /* Store two DPFP values.  The address must be 16-byte aligned.  */
158 static __inline void
_mm_store_pd(double * __P,__m128d __A)159 _mm_store_pd (double *__P, __m128d __A)
160 {
161   __builtin_ia32_storeapd (__P, (__v2df)__A);
162 }
163 
164 /* Store two DPFP values.  The address need not be 16-byte aligned.  */
165 static __inline void
_mm_storeu_pd(double * __P,__m128d __A)166 _mm_storeu_pd (double *__P, __m128d __A)
167 {
168   __builtin_ia32_storeupd (__P, (__v2df)__A);
169 }
170 
171 /* Store two DPFP values in reverse order.  The address must be aligned.  */
172 static __inline void
_mm_storer_pd(double * __P,__m128d __A)173 _mm_storer_pd (double *__P, __m128d __A)
174 {
175   __v2df __va = (__v2df)__A;
176   __v2df __tmp = __builtin_ia32_shufpd (__va, __va, _MM_SHUFFLE2 (0,1));
177   __builtin_ia32_storeapd (__P, __tmp);
178 }
179 
180 /* Sets the low DPFP value of A from the low value of B.  */
181 static __inline __m128d
_mm_move_sd(__m128d __A,__m128d __B)182 _mm_move_sd (__m128d __A, __m128d __B)
183 {
184   return (__m128d) __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B);
185 }
186 
187 
188 static __inline __m128d
_mm_add_pd(__m128d __A,__m128d __B)189 _mm_add_pd (__m128d __A, __m128d __B)
190 {
191   return (__m128d)__builtin_ia32_addpd ((__v2df)__A, (__v2df)__B);
192 }
193 
194 static __inline __m128d
_mm_add_sd(__m128d __A,__m128d __B)195 _mm_add_sd (__m128d __A, __m128d __B)
196 {
197   return (__m128d)__builtin_ia32_addsd ((__v2df)__A, (__v2df)__B);
198 }
199 
200 static __inline __m128d
_mm_sub_pd(__m128d __A,__m128d __B)201 _mm_sub_pd (__m128d __A, __m128d __B)
202 {
203   return (__m128d)__builtin_ia32_subpd ((__v2df)__A, (__v2df)__B);
204 }
205 
206 static __inline __m128d
_mm_sub_sd(__m128d __A,__m128d __B)207 _mm_sub_sd (__m128d __A, __m128d __B)
208 {
209   return (__m128d)__builtin_ia32_subsd ((__v2df)__A, (__v2df)__B);
210 }
211 
212 static __inline __m128d
_mm_mul_pd(__m128d __A,__m128d __B)213 _mm_mul_pd (__m128d __A, __m128d __B)
214 {
215   return (__m128d)__builtin_ia32_mulpd ((__v2df)__A, (__v2df)__B);
216 }
217 
218 static __inline __m128d
_mm_mul_sd(__m128d __A,__m128d __B)219 _mm_mul_sd (__m128d __A, __m128d __B)
220 {
221   return (__m128d)__builtin_ia32_mulsd ((__v2df)__A, (__v2df)__B);
222 }
223 
224 static __inline __m128d
_mm_div_pd(__m128d __A,__m128d __B)225 _mm_div_pd (__m128d __A, __m128d __B)
226 {
227   return (__m128d)__builtin_ia32_divpd ((__v2df)__A, (__v2df)__B);
228 }
229 
230 static __inline __m128d
_mm_div_sd(__m128d __A,__m128d __B)231 _mm_div_sd (__m128d __A, __m128d __B)
232 {
233   return (__m128d)__builtin_ia32_divsd ((__v2df)__A, (__v2df)__B);
234 }
235 
236 static __inline __m128d
_mm_sqrt_pd(__m128d __A)237 _mm_sqrt_pd (__m128d __A)
238 {
239   return (__m128d)__builtin_ia32_sqrtpd ((__v2df)__A);
240 }
241 
242 /* Return pair {sqrt (A[0), B[1]}.  */
243 static __inline __m128d
_mm_sqrt_sd(__m128d __A,__m128d __B)244 _mm_sqrt_sd (__m128d __A, __m128d __B)
245 {
246   __v2df __tmp = __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B);
247   return (__m128d)__builtin_ia32_sqrtsd ((__v2df)__tmp);
248 }
249 
250 static __inline __m128d
_mm_min_pd(__m128d __A,__m128d __B)251 _mm_min_pd (__m128d __A, __m128d __B)
252 {
253   return (__m128d)__builtin_ia32_minpd ((__v2df)__A, (__v2df)__B);
254 }
255 
256 static __inline __m128d
_mm_min_sd(__m128d __A,__m128d __B)257 _mm_min_sd (__m128d __A, __m128d __B)
258 {
259   return (__m128d)__builtin_ia32_minsd ((__v2df)__A, (__v2df)__B);
260 }
261 
262 static __inline __m128d
_mm_max_pd(__m128d __A,__m128d __B)263 _mm_max_pd (__m128d __A, __m128d __B)
264 {
265   return (__m128d)__builtin_ia32_maxpd ((__v2df)__A, (__v2df)__B);
266 }
267 
268 static __inline __m128d
_mm_max_sd(__m128d __A,__m128d __B)269 _mm_max_sd (__m128d __A, __m128d __B)
270 {
271   return (__m128d)__builtin_ia32_maxsd ((__v2df)__A, (__v2df)__B);
272 }
273 
274 static __inline __m128d
_mm_and_pd(__m128d __A,__m128d __B)275 _mm_and_pd (__m128d __A, __m128d __B)
276 {
277   return (__m128d)__builtin_ia32_andpd ((__v2df)__A, (__v2df)__B);
278 }
279 
280 static __inline __m128d
_mm_andnot_pd(__m128d __A,__m128d __B)281 _mm_andnot_pd (__m128d __A, __m128d __B)
282 {
283   return (__m128d)__builtin_ia32_andnpd ((__v2df)__A, (__v2df)__B);
284 }
285 
286 static __inline __m128d
_mm_or_pd(__m128d __A,__m128d __B)287 _mm_or_pd (__m128d __A, __m128d __B)
288 {
289   return (__m128d)__builtin_ia32_orpd ((__v2df)__A, (__v2df)__B);
290 }
291 
292 static __inline __m128d
_mm_xor_pd(__m128d __A,__m128d __B)293 _mm_xor_pd (__m128d __A, __m128d __B)
294 {
295   return (__m128d)__builtin_ia32_xorpd ((__v2df)__A, (__v2df)__B);
296 }
297 
298 static __inline __m128d
_mm_cmpeq_pd(__m128d __A,__m128d __B)299 _mm_cmpeq_pd (__m128d __A, __m128d __B)
300 {
301   return (__m128d)__builtin_ia32_cmpeqpd ((__v2df)__A, (__v2df)__B);
302 }
303 
304 static __inline __m128d
_mm_cmplt_pd(__m128d __A,__m128d __B)305 _mm_cmplt_pd (__m128d __A, __m128d __B)
306 {
307   return (__m128d)__builtin_ia32_cmpltpd ((__v2df)__A, (__v2df)__B);
308 }
309 
310 static __inline __m128d
_mm_cmple_pd(__m128d __A,__m128d __B)311 _mm_cmple_pd (__m128d __A, __m128d __B)
312 {
313   return (__m128d)__builtin_ia32_cmplepd ((__v2df)__A, (__v2df)__B);
314 }
315 
316 static __inline __m128d
_mm_cmpgt_pd(__m128d __A,__m128d __B)317 _mm_cmpgt_pd (__m128d __A, __m128d __B)
318 {
319   return (__m128d)__builtin_ia32_cmpgtpd ((__v2df)__A, (__v2df)__B);
320 }
321 
322 static __inline __m128d
_mm_cmpge_pd(__m128d __A,__m128d __B)323 _mm_cmpge_pd (__m128d __A, __m128d __B)
324 {
325   return (__m128d)__builtin_ia32_cmpgepd ((__v2df)__A, (__v2df)__B);
326 }
327 
328 static __inline __m128d
_mm_cmpneq_pd(__m128d __A,__m128d __B)329 _mm_cmpneq_pd (__m128d __A, __m128d __B)
330 {
331   return (__m128d)__builtin_ia32_cmpneqpd ((__v2df)__A, (__v2df)__B);
332 }
333 
334 static __inline __m128d
_mm_cmpnlt_pd(__m128d __A,__m128d __B)335 _mm_cmpnlt_pd (__m128d __A, __m128d __B)
336 {
337   return (__m128d)__builtin_ia32_cmpnltpd ((__v2df)__A, (__v2df)__B);
338 }
339 
340 static __inline __m128d
_mm_cmpnle_pd(__m128d __A,__m128d __B)341 _mm_cmpnle_pd (__m128d __A, __m128d __B)
342 {
343   return (__m128d)__builtin_ia32_cmpnlepd ((__v2df)__A, (__v2df)__B);
344 }
345 
346 static __inline __m128d
_mm_cmpngt_pd(__m128d __A,__m128d __B)347 _mm_cmpngt_pd (__m128d __A, __m128d __B)
348 {
349   return (__m128d)__builtin_ia32_cmpngtpd ((__v2df)__A, (__v2df)__B);
350 }
351 
352 static __inline __m128d
_mm_cmpnge_pd(__m128d __A,__m128d __B)353 _mm_cmpnge_pd (__m128d __A, __m128d __B)
354 {
355   return (__m128d)__builtin_ia32_cmpngepd ((__v2df)__A, (__v2df)__B);
356 }
357 
358 static __inline __m128d
_mm_cmpord_pd(__m128d __A,__m128d __B)359 _mm_cmpord_pd (__m128d __A, __m128d __B)
360 {
361   return (__m128d)__builtin_ia32_cmpordpd ((__v2df)__A, (__v2df)__B);
362 }
363 
364 static __inline __m128d
_mm_cmpunord_pd(__m128d __A,__m128d __B)365 _mm_cmpunord_pd (__m128d __A, __m128d __B)
366 {
367   return (__m128d)__builtin_ia32_cmpunordpd ((__v2df)__A, (__v2df)__B);
368 }
369 
370 static __inline __m128d
_mm_cmpeq_sd(__m128d __A,__m128d __B)371 _mm_cmpeq_sd (__m128d __A, __m128d __B)
372 {
373   return (__m128d)__builtin_ia32_cmpeqsd ((__v2df)__A, (__v2df)__B);
374 }
375 
376 static __inline __m128d
_mm_cmplt_sd(__m128d __A,__m128d __B)377 _mm_cmplt_sd (__m128d __A, __m128d __B)
378 {
379   return (__m128d)__builtin_ia32_cmpltsd ((__v2df)__A, (__v2df)__B);
380 }
381 
382 static __inline __m128d
_mm_cmple_sd(__m128d __A,__m128d __B)383 _mm_cmple_sd (__m128d __A, __m128d __B)
384 {
385   return (__m128d)__builtin_ia32_cmplesd ((__v2df)__A, (__v2df)__B);
386 }
387 
388 static __inline __m128d
_mm_cmpgt_sd(__m128d __A,__m128d __B)389 _mm_cmpgt_sd (__m128d __A, __m128d __B)
390 {
391   return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
392 					 (__v2df)
393 					 __builtin_ia32_cmpltsd ((__v2df) __B,
394 								 (__v2df)
395 								 __A));
396 }
397 
398 static __inline __m128d
_mm_cmpge_sd(__m128d __A,__m128d __B)399 _mm_cmpge_sd (__m128d __A, __m128d __B)
400 {
401   return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
402 					 (__v2df)
403 					 __builtin_ia32_cmplesd ((__v2df) __B,
404 								 (__v2df)
405 								 __A));
406 }
407 
408 static __inline __m128d
_mm_cmpneq_sd(__m128d __A,__m128d __B)409 _mm_cmpneq_sd (__m128d __A, __m128d __B)
410 {
411   return (__m128d)__builtin_ia32_cmpneqsd ((__v2df)__A, (__v2df)__B);
412 }
413 
414 static __inline __m128d
_mm_cmpnlt_sd(__m128d __A,__m128d __B)415 _mm_cmpnlt_sd (__m128d __A, __m128d __B)
416 {
417   return (__m128d)__builtin_ia32_cmpnltsd ((__v2df)__A, (__v2df)__B);
418 }
419 
420 static __inline __m128d
_mm_cmpnle_sd(__m128d __A,__m128d __B)421 _mm_cmpnle_sd (__m128d __A, __m128d __B)
422 {
423   return (__m128d)__builtin_ia32_cmpnlesd ((__v2df)__A, (__v2df)__B);
424 }
425 
426 static __inline __m128d
_mm_cmpngt_sd(__m128d __A,__m128d __B)427 _mm_cmpngt_sd (__m128d __A, __m128d __B)
428 {
429   return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
430 					 (__v2df)
431 					 __builtin_ia32_cmpnltsd ((__v2df) __B,
432 								  (__v2df)
433 								  __A));
434 }
435 
436 static __inline __m128d
_mm_cmpnge_sd(__m128d __A,__m128d __B)437 _mm_cmpnge_sd (__m128d __A, __m128d __B)
438 {
439   return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
440 					 (__v2df)
441 					 __builtin_ia32_cmpnlesd ((__v2df) __B,
442 								  (__v2df)
443 								  __A));
444 }
445 
446 static __inline __m128d
_mm_cmpord_sd(__m128d __A,__m128d __B)447 _mm_cmpord_sd (__m128d __A, __m128d __B)
448 {
449   return (__m128d)__builtin_ia32_cmpordsd ((__v2df)__A, (__v2df)__B);
450 }
451 
452 static __inline __m128d
_mm_cmpunord_sd(__m128d __A,__m128d __B)453 _mm_cmpunord_sd (__m128d __A, __m128d __B)
454 {
455   return (__m128d)__builtin_ia32_cmpunordsd ((__v2df)__A, (__v2df)__B);
456 }
457 
458 static __inline int
_mm_comieq_sd(__m128d __A,__m128d __B)459 _mm_comieq_sd (__m128d __A, __m128d __B)
460 {
461   return __builtin_ia32_comisdeq ((__v2df)__A, (__v2df)__B);
462 }
463 
464 static __inline int
_mm_comilt_sd(__m128d __A,__m128d __B)465 _mm_comilt_sd (__m128d __A, __m128d __B)
466 {
467   return __builtin_ia32_comisdlt ((__v2df)__A, (__v2df)__B);
468 }
469 
470 static __inline int
_mm_comile_sd(__m128d __A,__m128d __B)471 _mm_comile_sd (__m128d __A, __m128d __B)
472 {
473   return __builtin_ia32_comisdle ((__v2df)__A, (__v2df)__B);
474 }
475 
476 static __inline int
_mm_comigt_sd(__m128d __A,__m128d __B)477 _mm_comigt_sd (__m128d __A, __m128d __B)
478 {
479   return __builtin_ia32_comisdgt ((__v2df)__A, (__v2df)__B);
480 }
481 
482 static __inline int
_mm_comige_sd(__m128d __A,__m128d __B)483 _mm_comige_sd (__m128d __A, __m128d __B)
484 {
485   return __builtin_ia32_comisdge ((__v2df)__A, (__v2df)__B);
486 }
487 
488 static __inline int
_mm_comineq_sd(__m128d __A,__m128d __B)489 _mm_comineq_sd (__m128d __A, __m128d __B)
490 {
491   return __builtin_ia32_comisdneq ((__v2df)__A, (__v2df)__B);
492 }
493 
494 static __inline int
_mm_ucomieq_sd(__m128d __A,__m128d __B)495 _mm_ucomieq_sd (__m128d __A, __m128d __B)
496 {
497   return __builtin_ia32_ucomisdeq ((__v2df)__A, (__v2df)__B);
498 }
499 
500 static __inline int
_mm_ucomilt_sd(__m128d __A,__m128d __B)501 _mm_ucomilt_sd (__m128d __A, __m128d __B)
502 {
503   return __builtin_ia32_ucomisdlt ((__v2df)__A, (__v2df)__B);
504 }
505 
506 static __inline int
_mm_ucomile_sd(__m128d __A,__m128d __B)507 _mm_ucomile_sd (__m128d __A, __m128d __B)
508 {
509   return __builtin_ia32_ucomisdle ((__v2df)__A, (__v2df)__B);
510 }
511 
512 static __inline int
_mm_ucomigt_sd(__m128d __A,__m128d __B)513 _mm_ucomigt_sd (__m128d __A, __m128d __B)
514 {
515   return __builtin_ia32_ucomisdgt ((__v2df)__A, (__v2df)__B);
516 }
517 
518 static __inline int
_mm_ucomige_sd(__m128d __A,__m128d __B)519 _mm_ucomige_sd (__m128d __A, __m128d __B)
520 {
521   return __builtin_ia32_ucomisdge ((__v2df)__A, (__v2df)__B);
522 }
523 
524 static __inline int
_mm_ucomineq_sd(__m128d __A,__m128d __B)525 _mm_ucomineq_sd (__m128d __A, __m128d __B)
526 {
527   return __builtin_ia32_ucomisdneq ((__v2df)__A, (__v2df)__B);
528 }
529 
530 /* Create a vector with element 0 as *P and the rest zero.  */
531 
532 static __inline __m128i
_mm_load_si128(__m128i const * __P)533 _mm_load_si128 (__m128i const *__P)
534 {
535   return (__m128i) __builtin_ia32_loaddqa ((char const *)__P);
536 }
537 
538 static __inline __m128i
_mm_loadu_si128(__m128i const * __P)539 _mm_loadu_si128 (__m128i const *__P)
540 {
541   return (__m128i) __builtin_ia32_loaddqu ((char const *)__P);
542 }
543 
544 static __inline __m128i
_mm_loadl_epi64(__m128i const * __P)545 _mm_loadl_epi64 (__m128i const *__P)
546 {
547   return (__m128i) __builtin_ia32_movq2dq (*(unsigned long long *)__P);
548 }
549 
550 static __inline void
_mm_store_si128(__m128i * __P,__m128i __B)551 _mm_store_si128 (__m128i *__P, __m128i __B)
552 {
553   __builtin_ia32_storedqa ((char *)__P, (__v16qi)__B);
554 }
555 
556 static __inline void
_mm_storeu_si128(__m128i * __P,__m128i __B)557 _mm_storeu_si128 (__m128i *__P, __m128i __B)
558 {
559   __builtin_ia32_storedqu ((char *)__P, (__v16qi)__B);
560 }
561 
562 static __inline void
_mm_storel_epi64(__m128i * __P,__m128i __B)563 _mm_storel_epi64 (__m128i *__P, __m128i __B)
564 {
565   *(long long *)__P = __builtin_ia32_movdq2q ((__v2di)__B);
566 }
567 
568 static __inline __m64
_mm_movepi64_pi64(__m128i __B)569 _mm_movepi64_pi64 (__m128i __B)
570 {
571   return (__m64) __builtin_ia32_movdq2q ((__v2di)__B);
572 }
573 
574 static __inline __m128i
_mm_move_epi64(__m128i __A)575 _mm_move_epi64 (__m128i __A)
576 {
577   return (__m128i) __builtin_ia32_movq ((__v2di)__A);
578 }
579 
580 /* Create a vector of zeros.  */
581 static __inline __m128i
_mm_setzero_si128(void)582 _mm_setzero_si128 (void)
583 {
584   return (__m128i) __builtin_ia32_setzero128 ();
585 }
586 
587 static __inline __m128i
_mm_set_epi64(__m64 __A,__m64 __B)588 _mm_set_epi64 (__m64 __A,  __m64 __B)
589 {
590   __v2di __tmp = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__A);
591   __v2di __tmp2 = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__B);
592   return (__m128i)__builtin_ia32_punpcklqdq128 (__tmp2, __tmp);
593 }
594 
595 /* Create the vector [Z Y X W].  */
596 static __inline __m128i
_mm_set_epi32(int __Z,int __Y,int __X,int __W)597 _mm_set_epi32 (int __Z, int __Y, int __X, int __W)
598 {
599   union {
600     int __a[4];
601     __m128i __v;
602   } __u;
603 
604   __u.__a[0] = __W;
605   __u.__a[1] = __X;
606   __u.__a[2] = __Y;
607   __u.__a[3] = __Z;
608 
609   return __u.__v;
610 }
611 
612 #ifdef __x86_64__
613 /* Create the vector [Z Y].  */
614 static __inline __m128i
_mm_set_epi64x(long long __Z,long long __Y)615 _mm_set_epi64x (long long __Z, long long __Y)
616 {
617   union {
618     long __a[2];
619     __m128i __v;
620   } __u;
621 
622   __u.__a[0] = __Y;
623   __u.__a[1] = __Z;
624 
625   return __u.__v;
626 }
627 #endif
628 
629 /* Create the vector [S T U V Z Y X W].  */
630 static __inline __m128i
_mm_set_epi16(short __Z,short __Y,short __X,short __W,short __V,short __U,short __T,short __S)631 _mm_set_epi16 (short __Z, short __Y, short __X, short __W,
632 	       short __V, short __U, short __T, short __S)
633 {
634   union {
635     short __a[8];
636     __m128i __v;
637   } __u;
638 
639   __u.__a[0] = __S;
640   __u.__a[1] = __T;
641   __u.__a[2] = __U;
642   __u.__a[3] = __V;
643   __u.__a[4] = __W;
644   __u.__a[5] = __X;
645   __u.__a[6] = __Y;
646   __u.__a[7] = __Z;
647 
648   return __u.__v;
649 }
650 
651 /* Create the vector [S T U V Z Y X W].  */
652 static __inline __m128i
_mm_set_epi8(char __Z,char __Y,char __X,char __W,char __V,char __U,char __T,char __S,char __Z1,char __Y1,char __X1,char __W1,char __V1,char __U1,char __T1,char __S1)653 _mm_set_epi8 (char __Z, char __Y, char __X, char __W,
654 	      char __V, char __U, char __T, char __S,
655 	      char __Z1, char __Y1, char __X1, char __W1,
656 	      char __V1, char __U1, char __T1, char __S1)
657 {
658   union {
659     char __a[16];
660     __m128i __v;
661   } __u;
662 
663   __u.__a[0] = __S1;
664   __u.__a[1] = __T1;
665   __u.__a[2] = __U1;
666   __u.__a[3] = __V1;
667   __u.__a[4] = __W1;
668   __u.__a[5] = __X1;
669   __u.__a[6] = __Y1;
670   __u.__a[7] = __Z1;
671   __u.__a[8] = __S;
672   __u.__a[9] = __T;
673   __u.__a[10] = __U;
674   __u.__a[11] = __V;
675   __u.__a[12] = __W;
676   __u.__a[13] = __X;
677   __u.__a[14] = __Y;
678   __u.__a[15] = __Z;
679 
680   return __u.__v;
681 }
682 
683 static __inline __m128i
_mm_set1_epi64(__m64 __A)684 _mm_set1_epi64 (__m64 __A)
685 {
686   __v2di __tmp = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__A);
687   return (__m128i)__builtin_ia32_punpcklqdq128 (__tmp, __tmp);
688 }
689 
690 static __inline __m128i
_mm_set1_epi32(int __A)691 _mm_set1_epi32 (int __A)
692 {
693   __v4si __tmp = (__v4si)__builtin_ia32_loadd (&__A);
694   return (__m128i) __builtin_ia32_pshufd ((__v4si)__tmp, _MM_SHUFFLE (0,0,0,0));
695 }
696 
697 #ifdef __x86_64__
698 static __inline __m128i
_mm_set1_epi64x(long long __A)699 _mm_set1_epi64x (long long __A)
700 {
701   __v2di __tmp = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__A);
702   return (__m128i) __builtin_ia32_shufpd ((__v2df)__tmp, (__v2df)__tmp, _MM_SHUFFLE2 (0,0));
703 }
704 #endif
705 
706 static __inline __m128i
_mm_set1_epi16(short __A)707 _mm_set1_epi16 (short __A)
708 {
709   int __Acopy = (unsigned short)__A;
710   __v4si __tmp = (__v4si)__builtin_ia32_loadd (&__Acopy);
711   __tmp = (__v4si)__builtin_ia32_punpcklwd128 ((__v8hi)__tmp, (__v8hi)__tmp);
712   return (__m128i) __builtin_ia32_pshufd ((__v4si)__tmp, _MM_SHUFFLE (0,0,0,0));
713 }
714 
715 static __inline __m128i
_mm_set1_epi8(char __A)716 _mm_set1_epi8 (char __A)
717 {
718   int __Acopy = (unsigned char)__A;
719   __v4si __tmp = (__v4si)__builtin_ia32_loadd (&__Acopy);
720   __tmp = (__v4si)__builtin_ia32_punpcklbw128 ((__v16qi)__tmp, (__v16qi)__tmp);
721   __tmp = (__v4si)__builtin_ia32_punpcklbw128 ((__v16qi)__tmp, (__v16qi)__tmp);
722   return (__m128i) __builtin_ia32_pshufd ((__v4si)__tmp, _MM_SHUFFLE (0,0,0,0));
723 }
724 
725 static __inline __m128i
_mm_setr_epi64(__m64 __A,__m64 __B)726 _mm_setr_epi64 (__m64 __A,  __m64 __B)
727 {
728   __v2di __tmp = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__A);
729   __v2di __tmp2 = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__B);
730   return (__m128i)__builtin_ia32_punpcklqdq128 (__tmp, __tmp2);
731 }
732 
733 /* Create the vector [Z Y X W].  */
734 static __inline __m128i
_mm_setr_epi32(int __W,int __X,int __Y,int __Z)735 _mm_setr_epi32 (int __W, int __X, int __Y, int __Z)
736 {
737   union {
738     int __a[4];
739     __m128i __v;
740   } __u;
741 
742   __u.__a[0] = __W;
743   __u.__a[1] = __X;
744   __u.__a[2] = __Y;
745   __u.__a[3] = __Z;
746 
747   return __u.__v;
748 }
749 /* Create the vector [S T U V Z Y X W].  */
750 static __inline __m128i
_mm_setr_epi16(short __S,short __T,short __U,short __V,short __W,short __X,short __Y,short __Z)751 _mm_setr_epi16 (short __S, short __T, short __U, short __V,
752 	        short __W, short __X, short __Y, short __Z)
753 {
754   union {
755     short __a[8];
756     __m128i __v;
757   } __u;
758 
759   __u.__a[0] = __S;
760   __u.__a[1] = __T;
761   __u.__a[2] = __U;
762   __u.__a[3] = __V;
763   __u.__a[4] = __W;
764   __u.__a[5] = __X;
765   __u.__a[6] = __Y;
766   __u.__a[7] = __Z;
767 
768   return __u.__v;
769 }
770 
771 /* Create the vector [S T U V Z Y X W].  */
772 static __inline __m128i
_mm_setr_epi8(char __S1,char __T1,char __U1,char __V1,char __W1,char __X1,char __Y1,char __Z1,char __S,char __T,char __U,char __V,char __W,char __X,char __Y,char __Z)773 _mm_setr_epi8 (char __S1, char __T1, char __U1, char __V1,
774 	       char __W1, char __X1, char __Y1, char __Z1,
775 	       char __S, char __T, char __U, char __V,
776 	       char __W, char __X, char __Y, char __Z)
777 {
778   union {
779     char __a[16];
780     __m128i __v;
781   } __u;
782 
783   __u.__a[0] = __S1;
784   __u.__a[1] = __T1;
785   __u.__a[2] = __U1;
786   __u.__a[3] = __V1;
787   __u.__a[4] = __W1;
788   __u.__a[5] = __X1;
789   __u.__a[6] = __Y1;
790   __u.__a[7] = __Z1;
791   __u.__a[8] = __S;
792   __u.__a[9] = __T;
793   __u.__a[10] = __U;
794   __u.__a[11] = __V;
795   __u.__a[12] = __W;
796   __u.__a[13] = __X;
797   __u.__a[14] = __Y;
798   __u.__a[15] = __Z;
799 
800   return __u.__v;
801 }
802 
803 static __inline __m128d
_mm_cvtepi32_pd(__m128i __A)804 _mm_cvtepi32_pd (__m128i __A)
805 {
806   return (__m128d)__builtin_ia32_cvtdq2pd ((__v4si) __A);
807 }
808 
809 static __inline __m128
_mm_cvtepi32_ps(__m128i __A)810 _mm_cvtepi32_ps (__m128i __A)
811 {
812   return (__m128)__builtin_ia32_cvtdq2ps ((__v4si) __A);
813 }
814 
815 static __inline __m128i
_mm_cvtpd_epi32(__m128d __A)816 _mm_cvtpd_epi32 (__m128d __A)
817 {
818   return (__m128i)__builtin_ia32_cvtpd2dq ((__v2df) __A);
819 }
820 
821 static __inline __m64
_mm_cvtpd_pi32(__m128d __A)822 _mm_cvtpd_pi32 (__m128d __A)
823 {
824   return (__m64)__builtin_ia32_cvtpd2pi ((__v2df) __A);
825 }
826 
827 static __inline __m128
_mm_cvtpd_ps(__m128d __A)828 _mm_cvtpd_ps (__m128d __A)
829 {
830   return (__m128)__builtin_ia32_cvtpd2ps ((__v2df) __A);
831 }
832 
833 static __inline __m128i
_mm_cvttpd_epi32(__m128d __A)834 _mm_cvttpd_epi32 (__m128d __A)
835 {
836   return (__m128i)__builtin_ia32_cvttpd2dq ((__v2df) __A);
837 }
838 
839 static __inline __m64
_mm_cvttpd_pi32(__m128d __A)840 _mm_cvttpd_pi32 (__m128d __A)
841 {
842   return (__m64)__builtin_ia32_cvttpd2pi ((__v2df) __A);
843 }
844 
845 static __inline __m128d
_mm_cvtpi32_pd(__m64 __A)846 _mm_cvtpi32_pd (__m64 __A)
847 {
848   return (__m128d)__builtin_ia32_cvtpi2pd ((__v2si) __A);
849 }
850 
851 static __inline __m128i
_mm_cvtps_epi32(__m128 __A)852 _mm_cvtps_epi32 (__m128 __A)
853 {
854   return (__m128i)__builtin_ia32_cvtps2dq ((__v4sf) __A);
855 }
856 
857 static __inline __m128i
_mm_cvttps_epi32(__m128 __A)858 _mm_cvttps_epi32 (__m128 __A)
859 {
860   return (__m128i)__builtin_ia32_cvttps2dq ((__v4sf) __A);
861 }
862 
863 static __inline __m128d
_mm_cvtps_pd(__m128 __A)864 _mm_cvtps_pd (__m128 __A)
865 {
866   return (__m128d)__builtin_ia32_cvtps2pd ((__v4sf) __A);
867 }
868 
869 static __inline int
_mm_cvtsd_si32(__m128d __A)870 _mm_cvtsd_si32 (__m128d __A)
871 {
872   return __builtin_ia32_cvtsd2si ((__v2df) __A);
873 }
874 
875 #ifdef __x86_64__
876 static __inline long long
_mm_cvtsd_si64x(__m128d __A)877 _mm_cvtsd_si64x (__m128d __A)
878 {
879   return __builtin_ia32_cvtsd2si64 ((__v2df) __A);
880 }
881 #endif
882 
883 static __inline int
_mm_cvttsd_si32(__m128d __A)884 _mm_cvttsd_si32 (__m128d __A)
885 {
886   return __builtin_ia32_cvttsd2si ((__v2df) __A);
887 }
888 
889 #ifdef __x86_64__
890 static __inline long long
_mm_cvttsd_si64x(__m128d __A)891 _mm_cvttsd_si64x (__m128d __A)
892 {
893   return __builtin_ia32_cvttsd2si64 ((__v2df) __A);
894 }
895 #endif
896 
897 static __inline __m128
_mm_cvtsd_ss(__m128 __A,__m128d __B)898 _mm_cvtsd_ss (__m128 __A, __m128d __B)
899 {
900   return (__m128)__builtin_ia32_cvtsd2ss ((__v4sf) __A, (__v2df) __B);
901 }
902 
903 static __inline __m128d
_mm_cvtsi32_sd(__m128d __A,int __B)904 _mm_cvtsi32_sd (__m128d __A, int __B)
905 {
906   return (__m128d)__builtin_ia32_cvtsi2sd ((__v2df) __A, __B);
907 }
908 
909 #ifdef __x86_64__
910 static __inline __m128d
_mm_cvtsi64x_sd(__m128d __A,long long __B)911 _mm_cvtsi64x_sd (__m128d __A, long long __B)
912 {
913   return (__m128d)__builtin_ia32_cvtsi642sd ((__v2df) __A, __B);
914 }
915 #endif
916 
917 static __inline __m128d
_mm_cvtss_sd(__m128d __A,__m128 __B)918 _mm_cvtss_sd (__m128d __A, __m128 __B)
919 {
920   return (__m128d)__builtin_ia32_cvtss2sd ((__v2df) __A, (__v4sf)__B);
921 }
922 
923 #define _mm_shuffle_pd(__A, __B, __C) ((__m128d)__builtin_ia32_shufpd ((__v2df)__A, (__v2df)__B, (__C)))
924 
925 static __inline __m128d
_mm_unpackhi_pd(__m128d __A,__m128d __B)926 _mm_unpackhi_pd (__m128d __A, __m128d __B)
927 {
928   return (__m128d)__builtin_ia32_unpckhpd ((__v2df)__A, (__v2df)__B);
929 }
930 
931 static __inline __m128d
_mm_unpacklo_pd(__m128d __A,__m128d __B)932 _mm_unpacklo_pd (__m128d __A, __m128d __B)
933 {
934   return (__m128d)__builtin_ia32_unpcklpd ((__v2df)__A, (__v2df)__B);
935 }
936 
937 static __inline __m128d
_mm_loadh_pd(__m128d __A,double const * __B)938 _mm_loadh_pd (__m128d __A, double const *__B)
939 {
940   return (__m128d)__builtin_ia32_loadhpd ((__v2df)__A, (__v2si *)__B);
941 }
942 
943 static __inline void
_mm_storeh_pd(double * __A,__m128d __B)944 _mm_storeh_pd (double *__A, __m128d __B)
945 {
946   __builtin_ia32_storehpd ((__v2si *)__A, (__v2df)__B);
947 }
948 
949 static __inline __m128d
_mm_loadl_pd(__m128d __A,double const * __B)950 _mm_loadl_pd (__m128d __A, double const *__B)
951 {
952   return (__m128d)__builtin_ia32_loadlpd ((__v2df)__A, (__v2si *)__B);
953 }
954 
955 static __inline void
_mm_storel_pd(double * __A,__m128d __B)956 _mm_storel_pd (double *__A, __m128d __B)
957 {
958   __builtin_ia32_storelpd ((__v2si *)__A, (__v2df)__B);
959 }
960 
961 static __inline int
_mm_movemask_pd(__m128d __A)962 _mm_movemask_pd (__m128d __A)
963 {
964   return __builtin_ia32_movmskpd ((__v2df)__A);
965 }
966 
967 static __inline __m128i
_mm_packs_epi16(__m128i __A,__m128i __B)968 _mm_packs_epi16 (__m128i __A, __m128i __B)
969 {
970   return (__m128i)__builtin_ia32_packsswb128 ((__v8hi)__A, (__v8hi)__B);
971 }
972 
973 static __inline __m128i
_mm_packs_epi32(__m128i __A,__m128i __B)974 _mm_packs_epi32 (__m128i __A, __m128i __B)
975 {
976   return (__m128i)__builtin_ia32_packssdw128 ((__v4si)__A, (__v4si)__B);
977 }
978 
979 static __inline __m128i
_mm_packus_epi16(__m128i __A,__m128i __B)980 _mm_packus_epi16 (__m128i __A, __m128i __B)
981 {
982   return (__m128i)__builtin_ia32_packuswb128 ((__v8hi)__A, (__v8hi)__B);
983 }
984 
985 static __inline __m128i
_mm_unpackhi_epi8(__m128i __A,__m128i __B)986 _mm_unpackhi_epi8 (__m128i __A, __m128i __B)
987 {
988   return (__m128i)__builtin_ia32_punpckhbw128 ((__v16qi)__A, (__v16qi)__B);
989 }
990 
991 static __inline __m128i
_mm_unpackhi_epi16(__m128i __A,__m128i __B)992 _mm_unpackhi_epi16 (__m128i __A, __m128i __B)
993 {
994   return (__m128i)__builtin_ia32_punpckhwd128 ((__v8hi)__A, (__v8hi)__B);
995 }
996 
997 static __inline __m128i
_mm_unpackhi_epi32(__m128i __A,__m128i __B)998 _mm_unpackhi_epi32 (__m128i __A, __m128i __B)
999 {
1000   return (__m128i)__builtin_ia32_punpckhdq128 ((__v4si)__A, (__v4si)__B);
1001 }
1002 
1003 static __inline __m128i
_mm_unpackhi_epi64(__m128i __A,__m128i __B)1004 _mm_unpackhi_epi64 (__m128i __A, __m128i __B)
1005 {
1006   return (__m128i)__builtin_ia32_punpckhqdq128 ((__v2di)__A, (__v2di)__B);
1007 }
1008 
1009 static __inline __m128i
_mm_unpacklo_epi8(__m128i __A,__m128i __B)1010 _mm_unpacklo_epi8 (__m128i __A, __m128i __B)
1011 {
1012   return (__m128i)__builtin_ia32_punpcklbw128 ((__v16qi)__A, (__v16qi)__B);
1013 }
1014 
1015 static __inline __m128i
_mm_unpacklo_epi16(__m128i __A,__m128i __B)1016 _mm_unpacklo_epi16 (__m128i __A, __m128i __B)
1017 {
1018   return (__m128i)__builtin_ia32_punpcklwd128 ((__v8hi)__A, (__v8hi)__B);
1019 }
1020 
1021 static __inline __m128i
_mm_unpacklo_epi32(__m128i __A,__m128i __B)1022 _mm_unpacklo_epi32 (__m128i __A, __m128i __B)
1023 {
1024   return (__m128i)__builtin_ia32_punpckldq128 ((__v4si)__A, (__v4si)__B);
1025 }
1026 
1027 static __inline __m128i
_mm_unpacklo_epi64(__m128i __A,__m128i __B)1028 _mm_unpacklo_epi64 (__m128i __A, __m128i __B)
1029 {
1030   return (__m128i)__builtin_ia32_punpcklqdq128 ((__v2di)__A, (__v2di)__B);
1031 }
1032 
1033 static __inline __m128i
_mm_add_epi8(__m128i __A,__m128i __B)1034 _mm_add_epi8 (__m128i __A, __m128i __B)
1035 {
1036   return (__m128i)__builtin_ia32_paddb128 ((__v16qi)__A, (__v16qi)__B);
1037 }
1038 
1039 static __inline __m128i
_mm_add_epi16(__m128i __A,__m128i __B)1040 _mm_add_epi16 (__m128i __A, __m128i __B)
1041 {
1042   return (__m128i)__builtin_ia32_paddw128 ((__v8hi)__A, (__v8hi)__B);
1043 }
1044 
1045 static __inline __m128i
_mm_add_epi32(__m128i __A,__m128i __B)1046 _mm_add_epi32 (__m128i __A, __m128i __B)
1047 {
1048   return (__m128i)__builtin_ia32_paddd128 ((__v4si)__A, (__v4si)__B);
1049 }
1050 
1051 static __inline __m128i
_mm_add_epi64(__m128i __A,__m128i __B)1052 _mm_add_epi64 (__m128i __A, __m128i __B)
1053 {
1054   return (__m128i)__builtin_ia32_paddq128 ((__v2di)__A, (__v2di)__B);
1055 }
1056 
1057 static __inline __m128i
_mm_adds_epi8(__m128i __A,__m128i __B)1058 _mm_adds_epi8 (__m128i __A, __m128i __B)
1059 {
1060   return (__m128i)__builtin_ia32_paddsb128 ((__v16qi)__A, (__v16qi)__B);
1061 }
1062 
1063 static __inline __m128i
_mm_adds_epi16(__m128i __A,__m128i __B)1064 _mm_adds_epi16 (__m128i __A, __m128i __B)
1065 {
1066   return (__m128i)__builtin_ia32_paddsw128 ((__v8hi)__A, (__v8hi)__B);
1067 }
1068 
1069 static __inline __m128i
_mm_adds_epu8(__m128i __A,__m128i __B)1070 _mm_adds_epu8 (__m128i __A, __m128i __B)
1071 {
1072   return (__m128i)__builtin_ia32_paddusb128 ((__v16qi)__A, (__v16qi)__B);
1073 }
1074 
1075 static __inline __m128i
_mm_adds_epu16(__m128i __A,__m128i __B)1076 _mm_adds_epu16 (__m128i __A, __m128i __B)
1077 {
1078   return (__m128i)__builtin_ia32_paddusw128 ((__v8hi)__A, (__v8hi)__B);
1079 }
1080 
1081 static __inline __m128i
_mm_sub_epi8(__m128i __A,__m128i __B)1082 _mm_sub_epi8 (__m128i __A, __m128i __B)
1083 {
1084   return (__m128i)__builtin_ia32_psubb128 ((__v16qi)__A, (__v16qi)__B);
1085 }
1086 
1087 static __inline __m128i
_mm_sub_epi16(__m128i __A,__m128i __B)1088 _mm_sub_epi16 (__m128i __A, __m128i __B)
1089 {
1090   return (__m128i)__builtin_ia32_psubw128 ((__v8hi)__A, (__v8hi)__B);
1091 }
1092 
1093 static __inline __m128i
_mm_sub_epi32(__m128i __A,__m128i __B)1094 _mm_sub_epi32 (__m128i __A, __m128i __B)
1095 {
1096   return (__m128i)__builtin_ia32_psubd128 ((__v4si)__A, (__v4si)__B);
1097 }
1098 
1099 static __inline __m128i
_mm_sub_epi64(__m128i __A,__m128i __B)1100 _mm_sub_epi64 (__m128i __A, __m128i __B)
1101 {
1102   return (__m128i)__builtin_ia32_psubq128 ((__v2di)__A, (__v2di)__B);
1103 }
1104 
1105 static __inline __m128i
_mm_subs_epi8(__m128i __A,__m128i __B)1106 _mm_subs_epi8 (__m128i __A, __m128i __B)
1107 {
1108   return (__m128i)__builtin_ia32_psubsb128 ((__v16qi)__A, (__v16qi)__B);
1109 }
1110 
1111 static __inline __m128i
_mm_subs_epi16(__m128i __A,__m128i __B)1112 _mm_subs_epi16 (__m128i __A, __m128i __B)
1113 {
1114   return (__m128i)__builtin_ia32_psubsw128 ((__v8hi)__A, (__v8hi)__B);
1115 }
1116 
1117 static __inline __m128i
_mm_subs_epu8(__m128i __A,__m128i __B)1118 _mm_subs_epu8 (__m128i __A, __m128i __B)
1119 {
1120   return (__m128i)__builtin_ia32_psubusb128 ((__v16qi)__A, (__v16qi)__B);
1121 }
1122 
1123 static __inline __m128i
_mm_subs_epu16(__m128i __A,__m128i __B)1124 _mm_subs_epu16 (__m128i __A, __m128i __B)
1125 {
1126   return (__m128i)__builtin_ia32_psubusw128 ((__v8hi)__A, (__v8hi)__B);
1127 }
1128 
1129 static __inline __m128i
_mm_madd_epi16(__m128i __A,__m128i __B)1130 _mm_madd_epi16 (__m128i __A, __m128i __B)
1131 {
1132   return (__m128i)__builtin_ia32_pmaddwd128 ((__v8hi)__A, (__v8hi)__B);
1133 }
1134 
1135 static __inline __m128i
_mm_mulhi_epi16(__m128i __A,__m128i __B)1136 _mm_mulhi_epi16 (__m128i __A, __m128i __B)
1137 {
1138   return (__m128i)__builtin_ia32_pmulhw128 ((__v8hi)__A, (__v8hi)__B);
1139 }
1140 
1141 static __inline __m128i
_mm_mullo_epi16(__m128i __A,__m128i __B)1142 _mm_mullo_epi16 (__m128i __A, __m128i __B)
1143 {
1144   return (__m128i)__builtin_ia32_pmullw128 ((__v8hi)__A, (__v8hi)__B);
1145 }
1146 
1147 static __inline __m64
_mm_mul_su32(__m64 __A,__m64 __B)1148 _mm_mul_su32 (__m64 __A, __m64 __B)
1149 {
1150   return (__m64)__builtin_ia32_pmuludq ((__v2si)__A, (__v2si)__B);
1151 }
1152 
1153 static __inline __m128i
_mm_mul_epu32(__m128i __A,__m128i __B)1154 _mm_mul_epu32 (__m128i __A, __m128i __B)
1155 {
1156   return (__m128i)__builtin_ia32_pmuludq128 ((__v4si)__A, (__v4si)__B);
1157 }
1158 
1159 static __inline __m128i
_mm_sll_epi16(__m128i __A,__m128i __B)1160 _mm_sll_epi16 (__m128i __A, __m128i __B)
1161 {
1162   return (__m128i)__builtin_ia32_psllw128 ((__v8hi)__A, (__v2di)__B);
1163 }
1164 
1165 static __inline __m128i
_mm_sll_epi32(__m128i __A,__m128i __B)1166 _mm_sll_epi32 (__m128i __A, __m128i __B)
1167 {
1168   return (__m128i)__builtin_ia32_pslld128 ((__v4si)__A, (__v2di)__B);
1169 }
1170 
1171 static __inline __m128i
_mm_sll_epi64(__m128i __A,__m128i __B)1172 _mm_sll_epi64 (__m128i __A, __m128i __B)
1173 {
1174   return (__m128i)__builtin_ia32_psllq128 ((__v2di)__A, (__v2di)__B);
1175 }
1176 
1177 static __inline __m128i
_mm_sra_epi16(__m128i __A,__m128i __B)1178 _mm_sra_epi16 (__m128i __A, __m128i __B)
1179 {
1180   return (__m128i)__builtin_ia32_psraw128 ((__v8hi)__A, (__v2di)__B);
1181 }
1182 
1183 static __inline __m128i
_mm_sra_epi32(__m128i __A,__m128i __B)1184 _mm_sra_epi32 (__m128i __A, __m128i __B)
1185 {
1186   return (__m128i)__builtin_ia32_psrad128 ((__v4si)__A, (__v2di)__B);
1187 }
1188 
1189 static __inline __m128i
_mm_srl_epi16(__m128i __A,__m128i __B)1190 _mm_srl_epi16 (__m128i __A, __m128i __B)
1191 {
1192   return (__m128i)__builtin_ia32_psrlw128 ((__v8hi)__A, (__v2di)__B);
1193 }
1194 
1195 static __inline __m128i
_mm_srl_epi32(__m128i __A,__m128i __B)1196 _mm_srl_epi32 (__m128i __A, __m128i __B)
1197 {
1198   return (__m128i)__builtin_ia32_psrld128 ((__v4si)__A, (__v2di)__B);
1199 }
1200 
1201 static __inline __m128i
_mm_srl_epi64(__m128i __A,__m128i __B)1202 _mm_srl_epi64 (__m128i __A, __m128i __B)
1203 {
1204   return (__m128i)__builtin_ia32_psrlq128 ((__v2di)__A, (__v2di)__B);
1205 }
1206 
1207 static __inline __m128i
_mm_slli_epi16(__m128i __A,int __B)1208 _mm_slli_epi16 (__m128i __A, int __B)
1209 {
1210   return (__m128i)__builtin_ia32_psllwi128 ((__v8hi)__A, __B);
1211 }
1212 
1213 static __inline __m128i
_mm_slli_epi32(__m128i __A,int __B)1214 _mm_slli_epi32 (__m128i __A, int __B)
1215 {
1216   return (__m128i)__builtin_ia32_pslldi128 ((__v4si)__A, __B);
1217 }
1218 
1219 static __inline __m128i
_mm_slli_epi64(__m128i __A,int __B)1220 _mm_slli_epi64 (__m128i __A, int __B)
1221 {
1222   return (__m128i)__builtin_ia32_psllqi128 ((__v2di)__A, __B);
1223 }
1224 
1225 static __inline __m128i
_mm_srai_epi16(__m128i __A,int __B)1226 _mm_srai_epi16 (__m128i __A, int __B)
1227 {
1228   return (__m128i)__builtin_ia32_psrawi128 ((__v8hi)__A, __B);
1229 }
1230 
1231 static __inline __m128i
_mm_srai_epi32(__m128i __A,int __B)1232 _mm_srai_epi32 (__m128i __A, int __B)
1233 {
1234   return (__m128i)__builtin_ia32_psradi128 ((__v4si)__A, __B);
1235 }
1236 
1237 #if 0
1238 static __m128i __attribute__((__always_inline__))
1239 _mm_srli_si128 (__m128i __A, const int __B)
1240 {
1241   return ((__m128i)__builtin_ia32_psrldqi128 (__A, __B))
1242 }
1243 
1244 static __m128i __attribute__((__always_inline__))
1245 _mm_srli_si128 (__m128i __A, const int __B)
1246 {
1247   return ((__m128i)__builtin_ia32_pslldqi128 (__A, __B))
1248 }
1249 #endif
1250 #define _mm_srli_si128(__A, __B) ((__m128i)__builtin_ia32_psrldqi128 (__A, __B))
1251 #define _mm_slli_si128(__A, __B) ((__m128i)__builtin_ia32_pslldqi128 (__A, __B))
1252 
1253 static __inline __m128i
_mm_srli_epi16(__m128i __A,int __B)1254 _mm_srli_epi16 (__m128i __A, int __B)
1255 {
1256   return (__m128i)__builtin_ia32_psrlwi128 ((__v8hi)__A, __B);
1257 }
1258 
1259 static __inline __m128i
_mm_srli_epi32(__m128i __A,int __B)1260 _mm_srli_epi32 (__m128i __A, int __B)
1261 {
1262   return (__m128i)__builtin_ia32_psrldi128 ((__v4si)__A, __B);
1263 }
1264 
1265 static __inline __m128i
_mm_srli_epi64(__m128i __A,int __B)1266 _mm_srli_epi64 (__m128i __A, int __B)
1267 {
1268   return (__m128i)__builtin_ia32_psrlqi128 ((__v2di)__A, __B);
1269 }
1270 
1271 static __inline __m128i
_mm_and_si128(__m128i __A,__m128i __B)1272 _mm_and_si128 (__m128i __A, __m128i __B)
1273 {
1274   return (__m128i)__builtin_ia32_pand128 ((__v2di)__A, (__v2di)__B);
1275 }
1276 
1277 static __inline __m128i
_mm_andnot_si128(__m128i __A,__m128i __B)1278 _mm_andnot_si128 (__m128i __A, __m128i __B)
1279 {
1280   return (__m128i)__builtin_ia32_pandn128 ((__v2di)__A, (__v2di)__B);
1281 }
1282 
1283 static __inline __m128i
_mm_or_si128(__m128i __A,__m128i __B)1284 _mm_or_si128 (__m128i __A, __m128i __B)
1285 {
1286   return (__m128i)__builtin_ia32_por128 ((__v2di)__A, (__v2di)__B);
1287 }
1288 
1289 static __inline __m128i
_mm_xor_si128(__m128i __A,__m128i __B)1290 _mm_xor_si128 (__m128i __A, __m128i __B)
1291 {
1292   return (__m128i)__builtin_ia32_pxor128 ((__v2di)__A, (__v2di)__B);
1293 }
1294 
1295 static __inline __m128i
_mm_cmpeq_epi8(__m128i __A,__m128i __B)1296 _mm_cmpeq_epi8 (__m128i __A, __m128i __B)
1297 {
1298   return (__m128i)__builtin_ia32_pcmpeqb128 ((__v16qi)__A, (__v16qi)__B);
1299 }
1300 
1301 static __inline __m128i
_mm_cmpeq_epi16(__m128i __A,__m128i __B)1302 _mm_cmpeq_epi16 (__m128i __A, __m128i __B)
1303 {
1304   return (__m128i)__builtin_ia32_pcmpeqw128 ((__v8hi)__A, (__v8hi)__B);
1305 }
1306 
1307 static __inline __m128i
_mm_cmpeq_epi32(__m128i __A,__m128i __B)1308 _mm_cmpeq_epi32 (__m128i __A, __m128i __B)
1309 {
1310   return (__m128i)__builtin_ia32_pcmpeqd128 ((__v4si)__A, (__v4si)__B);
1311 }
1312 
1313 static __inline __m128i
_mm_cmplt_epi8(__m128i __A,__m128i __B)1314 _mm_cmplt_epi8 (__m128i __A, __m128i __B)
1315 {
1316   return (__m128i)__builtin_ia32_pcmpgtb128 ((__v16qi)__B, (__v16qi)__A);
1317 }
1318 
1319 static __inline __m128i
_mm_cmplt_epi16(__m128i __A,__m128i __B)1320 _mm_cmplt_epi16 (__m128i __A, __m128i __B)
1321 {
1322   return (__m128i)__builtin_ia32_pcmpgtw128 ((__v8hi)__B, (__v8hi)__A);
1323 }
1324 
1325 static __inline __m128i
_mm_cmplt_epi32(__m128i __A,__m128i __B)1326 _mm_cmplt_epi32 (__m128i __A, __m128i __B)
1327 {
1328   return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__B, (__v4si)__A);
1329 }
1330 
1331 static __inline __m128i
_mm_cmpgt_epi8(__m128i __A,__m128i __B)1332 _mm_cmpgt_epi8 (__m128i __A, __m128i __B)
1333 {
1334   return (__m128i)__builtin_ia32_pcmpgtb128 ((__v16qi)__A, (__v16qi)__B);
1335 }
1336 
1337 static __inline __m128i
_mm_cmpgt_epi16(__m128i __A,__m128i __B)1338 _mm_cmpgt_epi16 (__m128i __A, __m128i __B)
1339 {
1340   return (__m128i)__builtin_ia32_pcmpgtw128 ((__v8hi)__A, (__v8hi)__B);
1341 }
1342 
1343 static __inline __m128i
_mm_cmpgt_epi32(__m128i __A,__m128i __B)1344 _mm_cmpgt_epi32 (__m128i __A, __m128i __B)
1345 {
1346   return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__A, (__v4si)__B);
1347 }
1348 
1349 #define _mm_extract_epi16(__A, __B) __builtin_ia32_pextrw128 ((__v8hi)__A, __B)
1350 
1351 #define _mm_insert_epi16(__A, __B, __C) ((__m128i)__builtin_ia32_pinsrw128 ((__v8hi)__A, __B, __C))
1352 
1353 static __inline __m128i
_mm_max_epi16(__m128i __A,__m128i __B)1354 _mm_max_epi16 (__m128i __A, __m128i __B)
1355 {
1356   return (__m128i)__builtin_ia32_pmaxsw128 ((__v8hi)__A, (__v8hi)__B);
1357 }
1358 
1359 static __inline __m128i
_mm_max_epu8(__m128i __A,__m128i __B)1360 _mm_max_epu8 (__m128i __A, __m128i __B)
1361 {
1362   return (__m128i)__builtin_ia32_pmaxub128 ((__v16qi)__A, (__v16qi)__B);
1363 }
1364 
1365 static __inline __m128i
_mm_min_epi16(__m128i __A,__m128i __B)1366 _mm_min_epi16 (__m128i __A, __m128i __B)
1367 {
1368   return (__m128i)__builtin_ia32_pminsw128 ((__v8hi)__A, (__v8hi)__B);
1369 }
1370 
1371 static __inline __m128i
_mm_min_epu8(__m128i __A,__m128i __B)1372 _mm_min_epu8 (__m128i __A, __m128i __B)
1373 {
1374   return (__m128i)__builtin_ia32_pminub128 ((__v16qi)__A, (__v16qi)__B);
1375 }
1376 
1377 static __inline int
_mm_movemask_epi8(__m128i __A)1378 _mm_movemask_epi8 (__m128i __A)
1379 {
1380   return __builtin_ia32_pmovmskb128 ((__v16qi)__A);
1381 }
1382 
1383 static __inline __m128i
_mm_mulhi_epu16(__m128i __A,__m128i __B)1384 _mm_mulhi_epu16 (__m128i __A, __m128i __B)
1385 {
1386   return (__m128i)__builtin_ia32_pmulhuw128 ((__v8hi)__A, (__v8hi)__B);
1387 }
1388 
1389 #define _mm_shufflehi_epi16(__A, __B) ((__m128i)__builtin_ia32_pshufhw ((__v8hi)__A, __B))
1390 #define _mm_shufflelo_epi16(__A, __B) ((__m128i)__builtin_ia32_pshuflw ((__v8hi)__A, __B))
1391 #define _mm_shuffle_epi32(__A, __B) ((__m128i)__builtin_ia32_pshufd ((__v4si)__A, __B))
1392 
1393 static __inline void
_mm_maskmoveu_si128(__m128i __A,__m128i __B,char * __C)1394 _mm_maskmoveu_si128 (__m128i __A, __m128i __B, char *__C)
1395 {
1396   __builtin_ia32_maskmovdqu ((__v16qi)__A, (__v16qi)__B, __C);
1397 }
1398 
1399 static __inline __m128i
_mm_avg_epu8(__m128i __A,__m128i __B)1400 _mm_avg_epu8 (__m128i __A, __m128i __B)
1401 {
1402   return (__m128i)__builtin_ia32_pavgb128 ((__v16qi)__A, (__v16qi)__B);
1403 }
1404 
1405 static __inline __m128i
_mm_avg_epu16(__m128i __A,__m128i __B)1406 _mm_avg_epu16 (__m128i __A, __m128i __B)
1407 {
1408   return (__m128i)__builtin_ia32_pavgw128 ((__v8hi)__A, (__v8hi)__B);
1409 }
1410 
1411 static __inline __m128i
_mm_sad_epu8(__m128i __A,__m128i __B)1412 _mm_sad_epu8 (__m128i __A, __m128i __B)
1413 {
1414   return (__m128i)__builtin_ia32_psadbw128 ((__v16qi)__A, (__v16qi)__B);
1415 }
1416 
1417 static __inline void
_mm_stream_si32(int * __A,int __B)1418 _mm_stream_si32 (int *__A, int __B)
1419 {
1420   __builtin_ia32_movnti (__A, __B);
1421 }
1422 
1423 static __inline void
_mm_stream_si128(__m128i * __A,__m128i __B)1424 _mm_stream_si128 (__m128i *__A, __m128i __B)
1425 {
1426   __builtin_ia32_movntdq ((__v2di *)__A, (__v2di)__B);
1427 }
1428 
1429 static __inline void
_mm_stream_pd(double * __A,__m128d __B)1430 _mm_stream_pd (double *__A, __m128d __B)
1431 {
1432   __builtin_ia32_movntpd (__A, (__v2df)__B);
1433 }
1434 
1435 static __inline __m128i
_mm_movpi64_epi64(__m64 __A)1436 _mm_movpi64_epi64 (__m64 __A)
1437 {
1438   return (__m128i)__builtin_ia32_movq2dq ((unsigned long long)__A);
1439 }
1440 
1441 static __inline void
_mm_clflush(void const * __A)1442 _mm_clflush (void const *__A)
1443 {
1444   return __builtin_ia32_clflush (__A);
1445 }
1446 
1447 static __inline void
_mm_lfence(void)1448 _mm_lfence (void)
1449 {
1450   __builtin_ia32_lfence ();
1451 }
1452 
1453 static __inline void
_mm_mfence(void)1454 _mm_mfence (void)
1455 {
1456   __builtin_ia32_mfence ();
1457 }
1458 
1459 static __inline __m128i
_mm_cvtsi32_si128(int __A)1460 _mm_cvtsi32_si128 (int __A)
1461 {
1462   return (__m128i) __builtin_ia32_loadd (&__A);
1463 }
1464 
1465 #ifdef __x86_64__
1466 static __inline __m128i
_mm_cvtsi64x_si128(long long __A)1467 _mm_cvtsi64x_si128 (long long __A)
1468 {
1469   return (__m128i) __builtin_ia32_movq2dq (__A);
1470 }
1471 #endif
1472 
1473 static __inline int
_mm_cvtsi128_si32(__m128i __A)1474 _mm_cvtsi128_si32 (__m128i __A)
1475 {
1476   int __tmp;
1477   __builtin_ia32_stored (&__tmp, (__v4si)__A);
1478   return __tmp;
1479 }
1480 
1481 #ifdef __x86_64__
1482 static __inline long long
_mm_cvtsi128_si64x(__m128i __A)1483 _mm_cvtsi128_si64x (__m128i __A)
1484 {
1485   return __builtin_ia32_movdq2q ((__v2di)__A);
1486 }
1487 #endif
1488 
1489 #endif /* __SSE2__  */
1490 
1491 #endif /* _EMMINTRIN_H_INCLUDED */
1492