1 /* Copyright (C) 2002, 2003 Free Software Foundation, Inc. 2 3 This file is part of GNU CC. 4 5 GNU CC is free software; you can redistribute it and/or modify 6 it under the terms of the GNU General Public License as published by 7 the Free Software Foundation; either version 2, or (at your option) 8 any later version. 9 10 GNU CC is distributed in the hope that it will be useful, 11 but WITHOUT ANY WARRANTY; without even the implied warranty of 12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 GNU General Public License for more details. 14 15 You should have received a copy of the GNU General Public License 16 along with GNU CC; see the file COPYING. If not, write to 17 the Free Software Foundation, 59 Temple Place - Suite 330, 18 Boston, MA 02111-1307, USA. */ 19 20 /* As a special exception, if you include this header file into source 21 files compiled by GCC, this header file does not by itself cause 22 the resulting executable to be covered by the GNU General Public 23 License. This exception does not however invalidate any other 24 reasons why the executable file might be covered by the GNU General 25 Public License. */ 26 27 /* Implemented from the specification included in the Intel C++ Compiler 28 User Guide and Reference, version 8.0. */ 29 30 #ifndef _XMMINTRIN_H_INCLUDED 31 #define _XMMINTRIN_H_INCLUDED 32 33 #ifndef __SSE__ 34 # error "SSE instruction set not enabled" 35 #else 36 37 /* We need type definitions from the MMX header file. */ 38 #include <mmintrin.h> 39 40 /* The data type indended for user use. */ 41 typedef int __m128 __attribute__ ((__mode__(__V4SF__))); 42 43 /* Internal data types for implementing the instrinsics. */ 44 typedef int __v4sf __attribute__ ((__mode__(__V4SF__))); 45 typedef int __v4si __attribute__ ((__mode__(__V4SI__))); 46 47 /* Create a selector for use with the SHUFPS instruction. */ 48 #define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \ 49 (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0)) 50 51 /* Constants for use with _mm_prefetch. */ 52 enum _mm_hint 53 { 54 _MM_HINT_T0 = 3, 55 _MM_HINT_T1 = 2, 56 _MM_HINT_T2 = 1, 57 _MM_HINT_NTA = 0 58 }; 59 60 /* Bits in the MXCSR. */ 61 #define _MM_EXCEPT_MASK 0x003f 62 #define _MM_EXCEPT_INVALID 0x0001 63 #define _MM_EXCEPT_DENORM 0x0002 64 #define _MM_EXCEPT_DIV_ZERO 0x0004 65 #define _MM_EXCEPT_OVERFLOW 0x0008 66 #define _MM_EXCEPT_UNDERFLOW 0x0010 67 #define _MM_EXCEPT_INEXACT 0x0020 68 69 #define _MM_MASK_MASK 0x1f80 70 #define _MM_MASK_INVALID 0x0080 71 #define _MM_MASK_DENORM 0x0100 72 #define _MM_MASK_DIV_ZERO 0x0200 73 #define _MM_MASK_OVERFLOW 0x0400 74 #define _MM_MASK_UNDERFLOW 0x0800 75 #define _MM_MASK_INEXACT 0x1000 76 77 #define _MM_ROUND_MASK 0x6000 78 #define _MM_ROUND_NEAREST 0x0000 79 #define _MM_ROUND_DOWN 0x2000 80 #define _MM_ROUND_UP 0x4000 81 #define _MM_ROUND_TOWARD_ZERO 0x6000 82 83 #define _MM_FLUSH_ZERO_MASK 0x8000 84 #define _MM_FLUSH_ZERO_ON 0x8000 85 #define _MM_FLUSH_ZERO_OFF 0x0000 86 87 /* Perform the respective operation on the lower SPFP (single-precision 88 floating-point) values of A and B; the upper three SPFP values are 89 passed through from A. */ 90 91 static __inline __m128 92 _mm_add_ss (__m128 __A, __m128 __B) 93 { 94 return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B); 95 } 96 97 static __inline __m128 98 _mm_sub_ss (__m128 __A, __m128 __B) 99 { 100 return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B); 101 } 102 103 static __inline __m128 104 _mm_mul_ss (__m128 __A, __m128 __B) 105 { 106 return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B); 107 } 108 109 static __inline __m128 110 _mm_div_ss (__m128 __A, __m128 __B) 111 { 112 return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B); 113 } 114 115 static __inline __m128 116 _mm_sqrt_ss (__m128 __A) 117 { 118 return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A); 119 } 120 121 static __inline __m128 122 _mm_rcp_ss (__m128 __A) 123 { 124 return (__m128) __builtin_ia32_rcpss ((__v4sf)__A); 125 } 126 127 static __inline __m128 128 _mm_rsqrt_ss (__m128 __A) 129 { 130 return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A); 131 } 132 133 static __inline __m128 134 _mm_min_ss (__m128 __A, __m128 __B) 135 { 136 return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B); 137 } 138 139 static __inline __m128 140 _mm_max_ss (__m128 __A, __m128 __B) 141 { 142 return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B); 143 } 144 145 /* Perform the respective operation on the four SPFP values in A and B. */ 146 147 static __inline __m128 148 _mm_add_ps (__m128 __A, __m128 __B) 149 { 150 return (__m128) __builtin_ia32_addps ((__v4sf)__A, (__v4sf)__B); 151 } 152 153 static __inline __m128 154 _mm_sub_ps (__m128 __A, __m128 __B) 155 { 156 return (__m128) __builtin_ia32_subps ((__v4sf)__A, (__v4sf)__B); 157 } 158 159 static __inline __m128 160 _mm_mul_ps (__m128 __A, __m128 __B) 161 { 162 return (__m128) __builtin_ia32_mulps ((__v4sf)__A, (__v4sf)__B); 163 } 164 165 static __inline __m128 166 _mm_div_ps (__m128 __A, __m128 __B) 167 { 168 return (__m128) __builtin_ia32_divps ((__v4sf)__A, (__v4sf)__B); 169 } 170 171 static __inline __m128 172 _mm_sqrt_ps (__m128 __A) 173 { 174 return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A); 175 } 176 177 static __inline __m128 178 _mm_rcp_ps (__m128 __A) 179 { 180 return (__m128) __builtin_ia32_rcpps ((__v4sf)__A); 181 } 182 183 static __inline __m128 184 _mm_rsqrt_ps (__m128 __A) 185 { 186 return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A); 187 } 188 189 static __inline __m128 190 _mm_min_ps (__m128 __A, __m128 __B) 191 { 192 return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B); 193 } 194 195 static __inline __m128 196 _mm_max_ps (__m128 __A, __m128 __B) 197 { 198 return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B); 199 } 200 201 /* Perform logical bit-wise operations on 128-bit values. */ 202 203 static __inline __m128 204 _mm_and_ps (__m128 __A, __m128 __B) 205 { 206 return __builtin_ia32_andps (__A, __B); 207 } 208 209 static __inline __m128 210 _mm_andnot_ps (__m128 __A, __m128 __B) 211 { 212 return __builtin_ia32_andnps (__A, __B); 213 } 214 215 static __inline __m128 216 _mm_or_ps (__m128 __A, __m128 __B) 217 { 218 return __builtin_ia32_orps (__A, __B); 219 } 220 221 static __inline __m128 222 _mm_xor_ps (__m128 __A, __m128 __B) 223 { 224 return __builtin_ia32_xorps (__A, __B); 225 } 226 227 /* Perform a comparison on the lower SPFP values of A and B. If the 228 comparison is true, place a mask of all ones in the result, otherwise a 229 mask of zeros. The upper three SPFP values are passed through from A. */ 230 231 static __inline __m128 232 _mm_cmpeq_ss (__m128 __A, __m128 __B) 233 { 234 return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B); 235 } 236 237 static __inline __m128 238 _mm_cmplt_ss (__m128 __A, __m128 __B) 239 { 240 return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B); 241 } 242 243 static __inline __m128 244 _mm_cmple_ss (__m128 __A, __m128 __B) 245 { 246 return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B); 247 } 248 249 static __inline __m128 250 _mm_cmpgt_ss (__m128 __A, __m128 __B) 251 { 252 return (__m128) __builtin_ia32_movss ((__v4sf) __A, 253 (__v4sf) 254 __builtin_ia32_cmpltss ((__v4sf) __B, 255 (__v4sf) 256 __A)); 257 } 258 259 static __inline __m128 260 _mm_cmpge_ss (__m128 __A, __m128 __B) 261 { 262 return (__m128) __builtin_ia32_movss ((__v4sf) __A, 263 (__v4sf) 264 __builtin_ia32_cmpless ((__v4sf) __B, 265 (__v4sf) 266 __A)); 267 } 268 269 static __inline __m128 270 _mm_cmpneq_ss (__m128 __A, __m128 __B) 271 { 272 return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B); 273 } 274 275 static __inline __m128 276 _mm_cmpnlt_ss (__m128 __A, __m128 __B) 277 { 278 return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B); 279 } 280 281 static __inline __m128 282 _mm_cmpnle_ss (__m128 __A, __m128 __B) 283 { 284 return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B); 285 } 286 287 static __inline __m128 288 _mm_cmpngt_ss (__m128 __A, __m128 __B) 289 { 290 return (__m128) __builtin_ia32_movss ((__v4sf) __A, 291 (__v4sf) 292 __builtin_ia32_cmpnltss ((__v4sf) __B, 293 (__v4sf) 294 __A)); 295 } 296 297 static __inline __m128 298 _mm_cmpnge_ss (__m128 __A, __m128 __B) 299 { 300 return (__m128) __builtin_ia32_movss ((__v4sf) __A, 301 (__v4sf) 302 __builtin_ia32_cmpnless ((__v4sf) __B, 303 (__v4sf) 304 __A)); 305 } 306 307 static __inline __m128 308 _mm_cmpord_ss (__m128 __A, __m128 __B) 309 { 310 return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B); 311 } 312 313 static __inline __m128 314 _mm_cmpunord_ss (__m128 __A, __m128 __B) 315 { 316 return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B); 317 } 318 319 /* Perform a comparison on the four SPFP values of A and B. For each 320 element, if the comparison is true, place a mask of all ones in the 321 result, otherwise a mask of zeros. */ 322 323 static __inline __m128 324 _mm_cmpeq_ps (__m128 __A, __m128 __B) 325 { 326 return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B); 327 } 328 329 static __inline __m128 330 _mm_cmplt_ps (__m128 __A, __m128 __B) 331 { 332 return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B); 333 } 334 335 static __inline __m128 336 _mm_cmple_ps (__m128 __A, __m128 __B) 337 { 338 return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B); 339 } 340 341 static __inline __m128 342 _mm_cmpgt_ps (__m128 __A, __m128 __B) 343 { 344 return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B); 345 } 346 347 static __inline __m128 348 _mm_cmpge_ps (__m128 __A, __m128 __B) 349 { 350 return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B); 351 } 352 353 static __inline __m128 354 _mm_cmpneq_ps (__m128 __A, __m128 __B) 355 { 356 return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B); 357 } 358 359 static __inline __m128 360 _mm_cmpnlt_ps (__m128 __A, __m128 __B) 361 { 362 return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B); 363 } 364 365 static __inline __m128 366 _mm_cmpnle_ps (__m128 __A, __m128 __B) 367 { 368 return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B); 369 } 370 371 static __inline __m128 372 _mm_cmpngt_ps (__m128 __A, __m128 __B) 373 { 374 return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B); 375 } 376 377 static __inline __m128 378 _mm_cmpnge_ps (__m128 __A, __m128 __B) 379 { 380 return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B); 381 } 382 383 static __inline __m128 384 _mm_cmpord_ps (__m128 __A, __m128 __B) 385 { 386 return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B); 387 } 388 389 static __inline __m128 390 _mm_cmpunord_ps (__m128 __A, __m128 __B) 391 { 392 return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B); 393 } 394 395 /* Compare the lower SPFP values of A and B and return 1 if true 396 and 0 if false. */ 397 398 static __inline int 399 _mm_comieq_ss (__m128 __A, __m128 __B) 400 { 401 return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B); 402 } 403 404 static __inline int 405 _mm_comilt_ss (__m128 __A, __m128 __B) 406 { 407 return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B); 408 } 409 410 static __inline int 411 _mm_comile_ss (__m128 __A, __m128 __B) 412 { 413 return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B); 414 } 415 416 static __inline int 417 _mm_comigt_ss (__m128 __A, __m128 __B) 418 { 419 return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B); 420 } 421 422 static __inline int 423 _mm_comige_ss (__m128 __A, __m128 __B) 424 { 425 return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B); 426 } 427 428 static __inline int 429 _mm_comineq_ss (__m128 __A, __m128 __B) 430 { 431 return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B); 432 } 433 434 static __inline int 435 _mm_ucomieq_ss (__m128 __A, __m128 __B) 436 { 437 return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B); 438 } 439 440 static __inline int 441 _mm_ucomilt_ss (__m128 __A, __m128 __B) 442 { 443 return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B); 444 } 445 446 static __inline int 447 _mm_ucomile_ss (__m128 __A, __m128 __B) 448 { 449 return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B); 450 } 451 452 static __inline int 453 _mm_ucomigt_ss (__m128 __A, __m128 __B) 454 { 455 return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B); 456 } 457 458 static __inline int 459 _mm_ucomige_ss (__m128 __A, __m128 __B) 460 { 461 return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B); 462 } 463 464 static __inline int 465 _mm_ucomineq_ss (__m128 __A, __m128 __B) 466 { 467 return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B); 468 } 469 470 /* Convert the lower SPFP value to a 32-bit integer according to the current 471 rounding mode. */ 472 static __inline int 473 _mm_cvtss_si32 (__m128 __A) 474 { 475 return __builtin_ia32_cvtss2si ((__v4sf) __A); 476 } 477 478 static __inline int 479 _mm_cvt_ss2si (__m128 __A) 480 { 481 return _mm_cvtss_si32 (__A); 482 } 483 484 #ifdef __x86_64__ 485 /* Convert the lower SPFP value to a 32-bit integer according to the current 486 rounding mode. */ 487 static __inline long long 488 _mm_cvtss_si64x (__m128 __A) 489 { 490 return __builtin_ia32_cvtss2si64 ((__v4sf) __A); 491 } 492 #endif 493 494 /* Convert the two lower SPFP values to 32-bit integers according to the 495 current rounding mode. Return the integers in packed form. */ 496 static __inline __m64 497 _mm_cvtps_pi32 (__m128 __A) 498 { 499 return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A); 500 } 501 502 static __inline __m64 503 _mm_cvt_ps2pi (__m128 __A) 504 { 505 return _mm_cvtps_pi32 (__A); 506 } 507 508 /* Truncate the lower SPFP value to a 32-bit integer. */ 509 static __inline int 510 _mm_cvttss_si32 (__m128 __A) 511 { 512 return __builtin_ia32_cvttss2si ((__v4sf) __A); 513 } 514 515 static __inline int 516 _mm_cvtt_ss2si (__m128 __A) 517 { 518 return _mm_cvttss_si32 (__A); 519 } 520 521 #ifdef __x86_64__ 522 /* Truncate the lower SPFP value to a 32-bit integer. */ 523 static __inline long long 524 _mm_cvttss_si64x (__m128 __A) 525 { 526 return __builtin_ia32_cvttss2si64 ((__v4sf) __A); 527 } 528 #endif 529 530 /* Truncate the two lower SPFP values to 32-bit integers. Return the 531 integers in packed form. */ 532 static __inline __m64 533 _mm_cvttps_pi32 (__m128 __A) 534 { 535 return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A); 536 } 537 538 static __inline __m64 539 _mm_cvtt_ps2pi (__m128 __A) 540 { 541 return _mm_cvttps_pi32 (__A); 542 } 543 544 /* Convert B to a SPFP value and insert it as element zero in A. */ 545 static __inline __m128 546 _mm_cvtsi32_ss (__m128 __A, int __B) 547 { 548 return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B); 549 } 550 551 static __inline __m128 552 _mm_cvt_si2ss (__m128 __A, int __B) 553 { 554 return _mm_cvtsi32_ss (__A, __B); 555 } 556 557 #ifdef __x86_64__ 558 /* Convert B to a SPFP value and insert it as element zero in A. */ 559 static __inline __m128 560 _mm_cvtsi64x_ss (__m128 __A, long long __B) 561 { 562 return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B); 563 } 564 #endif 565 566 /* Convert the two 32-bit values in B to SPFP form and insert them 567 as the two lower elements in A. */ 568 static __inline __m128 569 _mm_cvtpi32_ps (__m128 __A, __m64 __B) 570 { 571 return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B); 572 } 573 574 static __inline __m128 575 _mm_cvt_pi2ps (__m128 __A, __m64 __B) 576 { 577 return _mm_cvtpi32_ps (__A, __B); 578 } 579 580 /* Convert the four signed 16-bit values in A to SPFP form. */ 581 static __inline __m128 582 _mm_cvtpi16_ps (__m64 __A) 583 { 584 __v4hi __sign; 585 __v2si __hisi, __losi; 586 __v4sf __r; 587 588 /* This comparison against zero gives us a mask that can be used to 589 fill in the missing sign bits in the unpack operations below, so 590 that we get signed values after unpacking. */ 591 __sign = (__v4hi) __builtin_ia32_mmx_zero (); 592 __sign = __builtin_ia32_pcmpgtw (__sign, (__v4hi)__A); 593 594 /* Convert the four words to doublewords. */ 595 __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign); 596 __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign); 597 598 /* Convert the doublewords to floating point two at a time. */ 599 __r = (__v4sf) __builtin_ia32_setzerops (); 600 __r = __builtin_ia32_cvtpi2ps (__r, __hisi); 601 __r = __builtin_ia32_movlhps (__r, __r); 602 __r = __builtin_ia32_cvtpi2ps (__r, __losi); 603 604 return (__m128) __r; 605 } 606 607 /* Convert the four unsigned 16-bit values in A to SPFP form. */ 608 static __inline __m128 609 _mm_cvtpu16_ps (__m64 __A) 610 { 611 __v4hi __zero = (__v4hi) __builtin_ia32_mmx_zero (); 612 __v2si __hisi, __losi; 613 __v4sf __r; 614 615 /* Convert the four words to doublewords. */ 616 __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __zero); 617 __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __zero); 618 619 /* Convert the doublewords to floating point two at a time. */ 620 __r = (__v4sf) __builtin_ia32_setzerops (); 621 __r = __builtin_ia32_cvtpi2ps (__r, __hisi); 622 __r = __builtin_ia32_movlhps (__r, __r); 623 __r = __builtin_ia32_cvtpi2ps (__r, __losi); 624 625 return (__m128) __r; 626 } 627 628 /* Convert the low four signed 8-bit values in A to SPFP form. */ 629 static __inline __m128 630 _mm_cvtpi8_ps (__m64 __A) 631 { 632 __v8qi __sign; 633 634 /* This comparison against zero gives us a mask that can be used to 635 fill in the missing sign bits in the unpack operations below, so 636 that we get signed values after unpacking. */ 637 __sign = (__v8qi) __builtin_ia32_mmx_zero (); 638 __sign = __builtin_ia32_pcmpgtb (__sign, (__v8qi)__A); 639 640 /* Convert the four low bytes to words. */ 641 __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign); 642 643 return _mm_cvtpi16_ps(__A); 644 } 645 646 /* Convert the low four unsigned 8-bit values in A to SPFP form. */ 647 static __inline __m128 648 _mm_cvtpu8_ps(__m64 __A) 649 { 650 __v8qi __zero = (__v8qi) __builtin_ia32_mmx_zero (); 651 __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __zero); 652 return _mm_cvtpu16_ps(__A); 653 } 654 655 /* Convert the four signed 32-bit values in A and B to SPFP form. */ 656 static __inline __m128 657 _mm_cvtpi32x2_ps(__m64 __A, __m64 __B) 658 { 659 __v4sf __zero = (__v4sf) __builtin_ia32_setzerops (); 660 __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A); 661 __v4sf __sfb = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__B); 662 return (__m128) __builtin_ia32_movlhps (__sfa, __sfb); 663 } 664 665 /* Convert the four SPFP values in A to four signed 16-bit integers. */ 666 static __inline __m64 667 _mm_cvtps_pi16(__m128 __A) 668 { 669 __v4sf __hisf = (__v4sf)__A; 670 __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf); 671 __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf); 672 __v2si __losi = __builtin_ia32_cvtps2pi (__losf); 673 return (__m64) __builtin_ia32_packssdw (__hisi, __losi); 674 } 675 676 /* Convert the four SPFP values in A to four signed 8-bit integers. */ 677 static __inline __m64 678 _mm_cvtps_pi8(__m128 __A) 679 { 680 __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A); 681 __v4hi __zero = (__v4hi) __builtin_ia32_mmx_zero (); 682 return (__m64) __builtin_ia32_packsswb (__tmp, __zero); 683 } 684 685 /* Selects four specific SPFP values from A and B based on MASK. */ 686 #if 0 687 static __inline __m128 688 _mm_shuffle_ps (__m128 __A, __m128 __B, int __mask) 689 { 690 return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask); 691 } 692 #else 693 #define _mm_shuffle_ps(A, B, MASK) \ 694 ((__m128) __builtin_ia32_shufps ((__v4sf)(A), (__v4sf)(B), (MASK))) 695 #endif 696 697 698 /* Selects and interleaves the upper two SPFP values from A and B. */ 699 static __inline __m128 700 _mm_unpackhi_ps (__m128 __A, __m128 __B) 701 { 702 return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B); 703 } 704 705 /* Selects and interleaves the lower two SPFP values from A and B. */ 706 static __inline __m128 707 _mm_unpacklo_ps (__m128 __A, __m128 __B) 708 { 709 return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B); 710 } 711 712 /* Sets the upper two SPFP values with 64-bits of data loaded from P; 713 the lower two values are passed through from A. */ 714 static __inline __m128 715 _mm_loadh_pi (__m128 __A, __m64 const *__P) 716 { 717 return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (__v2si *)__P); 718 } 719 720 /* Stores the upper two SPFP values of A into P. */ 721 static __inline void 722 _mm_storeh_pi (__m64 *__P, __m128 __A) 723 { 724 __builtin_ia32_storehps ((__v2si *)__P, (__v4sf)__A); 725 } 726 727 /* Moves the upper two values of B into the lower two values of A. */ 728 static __inline __m128 729 _mm_movehl_ps (__m128 __A, __m128 __B) 730 { 731 return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B); 732 } 733 734 /* Moves the lower two values of B into the upper two values of A. */ 735 static __inline __m128 736 _mm_movelh_ps (__m128 __A, __m128 __B) 737 { 738 return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B); 739 } 740 741 /* Sets the lower two SPFP values with 64-bits of data loaded from P; 742 the upper two values are passed through from A. */ 743 static __inline __m128 744 _mm_loadl_pi (__m128 __A, __m64 const *__P) 745 { 746 return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (__v2si *)__P); 747 } 748 749 /* Stores the lower two SPFP values of A into P. */ 750 static __inline void 751 _mm_storel_pi (__m64 *__P, __m128 __A) 752 { 753 __builtin_ia32_storelps ((__v2si *)__P, (__v4sf)__A); 754 } 755 756 /* Creates a 4-bit mask from the most significant bits of the SPFP values. */ 757 static __inline int 758 _mm_movemask_ps (__m128 __A) 759 { 760 return __builtin_ia32_movmskps ((__v4sf)__A); 761 } 762 763 /* Return the contents of the control register. */ 764 static __inline unsigned int 765 _mm_getcsr (void) 766 { 767 return __builtin_ia32_stmxcsr (); 768 } 769 770 /* Read exception bits from the control register. */ 771 static __inline unsigned int 772 _MM_GET_EXCEPTION_STATE (void) 773 { 774 return _mm_getcsr() & _MM_EXCEPT_MASK; 775 } 776 777 static __inline unsigned int 778 _MM_GET_EXCEPTION_MASK (void) 779 { 780 return _mm_getcsr() & _MM_MASK_MASK; 781 } 782 783 static __inline unsigned int 784 _MM_GET_ROUNDING_MODE (void) 785 { 786 return _mm_getcsr() & _MM_ROUND_MASK; 787 } 788 789 static __inline unsigned int 790 _MM_GET_FLUSH_ZERO_MODE (void) 791 { 792 return _mm_getcsr() & _MM_FLUSH_ZERO_MASK; 793 } 794 795 /* Set the control register to I. */ 796 static __inline void 797 _mm_setcsr (unsigned int __I) 798 { 799 __builtin_ia32_ldmxcsr (__I); 800 } 801 802 /* Set exception bits in the control register. */ 803 static __inline void 804 _MM_SET_EXCEPTION_STATE(unsigned int __mask) 805 { 806 _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask); 807 } 808 809 static __inline void 810 _MM_SET_EXCEPTION_MASK (unsigned int __mask) 811 { 812 _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask); 813 } 814 815 static __inline void 816 _MM_SET_ROUNDING_MODE (unsigned int __mode) 817 { 818 _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode); 819 } 820 821 static __inline void 822 _MM_SET_FLUSH_ZERO_MODE (unsigned int __mode) 823 { 824 _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode); 825 } 826 827 /* Create a vector with element 0 as *P and the rest zero. */ 828 static __inline __m128 829 _mm_load_ss (float const *__P) 830 { 831 return (__m128) __builtin_ia32_loadss (__P); 832 } 833 834 /* Create a vector with all four elements equal to *P. */ 835 static __inline __m128 836 _mm_load1_ps (float const *__P) 837 { 838 __v4sf __tmp = __builtin_ia32_loadss (__P); 839 return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,0,0,0)); 840 } 841 842 static __inline __m128 843 _mm_load_ps1 (float const *__P) 844 { 845 return _mm_load1_ps (__P); 846 } 847 848 /* Load four SPFP values from P. The address must be 16-byte aligned. */ 849 static __inline __m128 850 _mm_load_ps (float const *__P) 851 { 852 return (__m128) __builtin_ia32_loadaps (__P); 853 } 854 855 /* Load four SPFP values from P. The address need not be 16-byte aligned. */ 856 static __inline __m128 857 _mm_loadu_ps (float const *__P) 858 { 859 return (__m128) __builtin_ia32_loadups (__P); 860 } 861 862 /* Load four SPFP values in reverse order. The address must be aligned. */ 863 static __inline __m128 864 _mm_loadr_ps (float const *__P) 865 { 866 __v4sf __tmp = __builtin_ia32_loadaps (__P); 867 return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3)); 868 } 869 870 /* Create a vector with element 0 as F and the rest zero. */ 871 static __inline __m128 872 _mm_set_ss (float __F) 873 { 874 return (__m128) __builtin_ia32_loadss (&__F); 875 } 876 877 /* Create a vector with all four elements equal to F. */ 878 static __inline __m128 879 _mm_set1_ps (float __F) 880 { 881 __v4sf __tmp = __builtin_ia32_loadss (&__F); 882 return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,0,0,0)); 883 } 884 885 static __inline __m128 886 _mm_set_ps1 (float __F) 887 { 888 return _mm_set1_ps (__F); 889 } 890 891 /* Create the vector [Z Y X W]. */ 892 static __inline __m128 893 _mm_set_ps (float __Z, float __Y, float __X, float __W) 894 { 895 union { 896 float __a[4]; 897 __m128 __v; 898 } __u; 899 900 __u.__a[0] = __W; 901 __u.__a[1] = __X; 902 __u.__a[2] = __Y; 903 __u.__a[3] = __Z; 904 905 return __u.__v; 906 } 907 908 /* Create the vector [W X Y Z]. */ 909 static __inline __m128 910 _mm_setr_ps (float __Z, float __Y, float __X, float __W) 911 { 912 return _mm_set_ps (__W, __X, __Y, __Z); 913 } 914 915 /* Create a vector of zeros. */ 916 static __inline __m128 917 _mm_setzero_ps (void) 918 { 919 return (__m128) __builtin_ia32_setzerops (); 920 } 921 922 /* Stores the lower SPFP value. */ 923 static __inline void 924 _mm_store_ss (float *__P, __m128 __A) 925 { 926 __builtin_ia32_storess (__P, (__v4sf)__A); 927 } 928 929 /* Store the lower SPFP value across four words. */ 930 static __inline void 931 _mm_store1_ps (float *__P, __m128 __A) 932 { 933 __v4sf __va = (__v4sf)__A; 934 __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0)); 935 __builtin_ia32_storeaps (__P, __tmp); 936 } 937 938 static __inline void 939 _mm_store_ps1 (float *__P, __m128 __A) 940 { 941 _mm_store1_ps (__P, __A); 942 } 943 944 /* Store four SPFP values. The address must be 16-byte aligned. */ 945 static __inline void 946 _mm_store_ps (float *__P, __m128 __A) 947 { 948 __builtin_ia32_storeaps (__P, (__v4sf)__A); 949 } 950 951 /* Store four SPFP values. The address need not be 16-byte aligned. */ 952 static __inline void 953 _mm_storeu_ps (float *__P, __m128 __A) 954 { 955 __builtin_ia32_storeups (__P, (__v4sf)__A); 956 } 957 958 /* Store four SPFP values in reverse order. The address must be aligned. */ 959 static __inline void 960 _mm_storer_ps (float *__P, __m128 __A) 961 { 962 __v4sf __va = (__v4sf)__A; 963 __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3)); 964 __builtin_ia32_storeaps (__P, __tmp); 965 } 966 967 /* Sets the low SPFP value of A from the low value of B. */ 968 static __inline __m128 969 _mm_move_ss (__m128 __A, __m128 __B) 970 { 971 return (__m128) __builtin_ia32_movss ((__v4sf)__A, (__v4sf)__B); 972 } 973 974 /* Extracts one of the four words of A. The selector N must be immediate. */ 975 #if 0 976 static __inline int 977 _mm_extract_pi16 (__m64 __A, int __N) 978 { 979 return __builtin_ia32_pextrw ((__v4hi)__A, __N); 980 } 981 982 static __inline int 983 _m_pextrw (__m64 __A, int __N) 984 { 985 return _mm_extract_pi16 (__A, __N); 986 } 987 #else 988 #define _mm_extract_pi16(A, N) \ 989 __builtin_ia32_pextrw ((__v4hi)(A), (N)) 990 #define _m_pextrw(A, N) _mm_extract_pi16((A), (N)) 991 #endif 992 993 /* Inserts word D into one of four words of A. The selector N must be 994 immediate. */ 995 #if 0 996 static __inline __m64 997 _mm_insert_pi16 (__m64 __A, int __D, int __N) 998 { 999 return (__m64)__builtin_ia32_pinsrw ((__v4hi)__A, __D, __N); 1000 } 1001 1002 static __inline __m64 1003 _m_pinsrw (__m64 __A, int __D, int __N) 1004 { 1005 return _mm_insert_pi16 (__A, __D, __N); 1006 } 1007 #else 1008 #define _mm_insert_pi16(A, D, N) \ 1009 ((__m64) __builtin_ia32_pinsrw ((__v4hi)(A), (D), (N))) 1010 #define _m_pinsrw(A, D, N) _mm_insert_pi16((A), (D), (N)) 1011 #endif 1012 1013 /* Compute the element-wise maximum of signed 16-bit values. */ 1014 static __inline __m64 1015 _mm_max_pi16 (__m64 __A, __m64 __B) 1016 { 1017 return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B); 1018 } 1019 1020 static __inline __m64 1021 _m_pmaxsw (__m64 __A, __m64 __B) 1022 { 1023 return _mm_max_pi16 (__A, __B); 1024 } 1025 1026 /* Compute the element-wise maximum of unsigned 8-bit values. */ 1027 static __inline __m64 1028 _mm_max_pu8 (__m64 __A, __m64 __B) 1029 { 1030 return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B); 1031 } 1032 1033 static __inline __m64 1034 _m_pmaxub (__m64 __A, __m64 __B) 1035 { 1036 return _mm_max_pu8 (__A, __B); 1037 } 1038 1039 /* Compute the element-wise minimum of signed 16-bit values. */ 1040 static __inline __m64 1041 _mm_min_pi16 (__m64 __A, __m64 __B) 1042 { 1043 return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B); 1044 } 1045 1046 static __inline __m64 1047 _m_pminsw (__m64 __A, __m64 __B) 1048 { 1049 return _mm_min_pi16 (__A, __B); 1050 } 1051 1052 /* Compute the element-wise minimum of unsigned 8-bit values. */ 1053 static __inline __m64 1054 _mm_min_pu8 (__m64 __A, __m64 __B) 1055 { 1056 return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B); 1057 } 1058 1059 static __inline __m64 1060 _m_pminub (__m64 __A, __m64 __B) 1061 { 1062 return _mm_min_pu8 (__A, __B); 1063 } 1064 1065 /* Create an 8-bit mask of the signs of 8-bit values. */ 1066 static __inline int 1067 _mm_movemask_pi8 (__m64 __A) 1068 { 1069 return __builtin_ia32_pmovmskb ((__v8qi)__A); 1070 } 1071 1072 static __inline int 1073 _m_pmovmskb (__m64 __A) 1074 { 1075 return _mm_movemask_pi8 (__A); 1076 } 1077 1078 /* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values 1079 in B and produce the high 16 bits of the 32-bit results. */ 1080 static __inline __m64 1081 _mm_mulhi_pu16 (__m64 __A, __m64 __B) 1082 { 1083 return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B); 1084 } 1085 1086 static __inline __m64 1087 _m_pmulhuw (__m64 __A, __m64 __B) 1088 { 1089 return _mm_mulhi_pu16 (__A, __B); 1090 } 1091 1092 /* Return a combination of the four 16-bit values in A. The selector 1093 must be an immediate. */ 1094 #if 0 1095 static __inline __m64 1096 _mm_shuffle_pi16 (__m64 __A, int __N) 1097 { 1098 return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N); 1099 } 1100 1101 static __inline __m64 1102 _m_pshufw (__m64 __A, int __N) 1103 { 1104 return _mm_shuffle_pi16 (__A, __N); 1105 } 1106 #else 1107 #define _mm_shuffle_pi16(A, N) \ 1108 ((__m64) __builtin_ia32_pshufw ((__v4hi)(A), (N))) 1109 #define _m_pshufw(A, N) _mm_shuffle_pi16 ((A), (N)) 1110 #endif 1111 1112 /* Conditionally store byte elements of A into P. The high bit of each 1113 byte in the selector N determines whether the corresponding byte from 1114 A is stored. */ 1115 static __inline void 1116 _mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P) 1117 { 1118 __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P); 1119 } 1120 1121 static __inline void 1122 _m_maskmovq (__m64 __A, __m64 __N, char *__P) 1123 { 1124 _mm_maskmove_si64 (__A, __N, __P); 1125 } 1126 1127 /* Compute the rounded averages of the unsigned 8-bit values in A and B. */ 1128 static __inline __m64 1129 _mm_avg_pu8 (__m64 __A, __m64 __B) 1130 { 1131 return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B); 1132 } 1133 1134 static __inline __m64 1135 _m_pavgb (__m64 __A, __m64 __B) 1136 { 1137 return _mm_avg_pu8 (__A, __B); 1138 } 1139 1140 /* Compute the rounded averages of the unsigned 16-bit values in A and B. */ 1141 static __inline __m64 1142 _mm_avg_pu16 (__m64 __A, __m64 __B) 1143 { 1144 return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B); 1145 } 1146 1147 static __inline __m64 1148 _m_pavgw (__m64 __A, __m64 __B) 1149 { 1150 return _mm_avg_pu16 (__A, __B); 1151 } 1152 1153 /* Compute the sum of the absolute differences of the unsigned 8-bit 1154 values in A and B. Return the value in the lower 16-bit word; the 1155 upper words are cleared. */ 1156 static __inline __m64 1157 _mm_sad_pu8 (__m64 __A, __m64 __B) 1158 { 1159 return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B); 1160 } 1161 1162 static __inline __m64 1163 _m_psadbw (__m64 __A, __m64 __B) 1164 { 1165 return _mm_sad_pu8 (__A, __B); 1166 } 1167 1168 /* Loads one cache line from address P to a location "closer" to the 1169 processor. The selector I specifies the type of prefetch operation. */ 1170 #if 0 1171 static __inline void 1172 _mm_prefetch (void *__P, enum _mm_hint __I) 1173 { 1174 __builtin_prefetch (__P, 0, __I); 1175 } 1176 #else 1177 #define _mm_prefetch(P, I) \ 1178 __builtin_prefetch ((P), 0, (I)) 1179 #endif 1180 1181 /* Stores the data in A to the address P without polluting the caches. */ 1182 static __inline void 1183 _mm_stream_pi (__m64 *__P, __m64 __A) 1184 { 1185 __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A); 1186 } 1187 1188 /* Likewise. The address must be 16-byte aligned. */ 1189 static __inline void 1190 _mm_stream_ps (float *__P, __m128 __A) 1191 { 1192 __builtin_ia32_movntps (__P, (__v4sf)__A); 1193 } 1194 1195 /* Guarantees that every preceeding store is globally visible before 1196 any subsequent store. */ 1197 static __inline void 1198 _mm_sfence (void) 1199 { 1200 __builtin_ia32_sfence (); 1201 } 1202 1203 /* The execution of the next instruction is delayed by an implementation 1204 specific amount of time. The instruction does not modify the 1205 architectural state. */ 1206 static __inline void 1207 _mm_pause (void) 1208 { 1209 __asm__ __volatile__ ("rep; nop" : : ); 1210 } 1211 1212 /* Transpose the 4x4 matrix composed of row[0-3]. */ 1213 #define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ 1214 do { \ 1215 __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \ 1216 __v4sf __t0 = __builtin_ia32_shufps (__r0, __r1, 0x44); \ 1217 __v4sf __t2 = __builtin_ia32_shufps (__r0, __r1, 0xEE); \ 1218 __v4sf __t1 = __builtin_ia32_shufps (__r2, __r3, 0x44); \ 1219 __v4sf __t3 = __builtin_ia32_shufps (__r2, __r3, 0xEE); \ 1220 (row0) = __builtin_ia32_shufps (__t0, __t1, 0x88); \ 1221 (row1) = __builtin_ia32_shufps (__t0, __t1, 0xDD); \ 1222 (row2) = __builtin_ia32_shufps (__t2, __t3, 0x88); \ 1223 (row3) = __builtin_ia32_shufps (__t2, __t3, 0xDD); \ 1224 } while (0) 1225 1226 /* For backward source compatibility. */ 1227 #include <emmintrin.h> 1228 1229 #endif /* __SSE__ */ 1230 #endif /* _XMMINTRIN_H_INCLUDED */ 1231