/openbsd/gnu/usr.bin/gcc/gcc/config/i386/ |
H A D | emmintrin.h | 154 __v2df __va = (__v2df)__A; in _mm_store1_pd() 183 __v2df __va = (__v2df)__A; in _mm_storer_pd() 254 __v2df __tmp = __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B); in _mm_sqrt_sd() 400 (__v2df) in _mm_cmpgt_sd() 410 (__v2df) in _mm_cmpge_sd() 438 (__v2df) in _mm_cmpngt_sd() 448 (__v2df) in _mm_cmpnge_sd() 469 return __builtin_ia32_comisdeq ((__v2df)__A, (__v2df)__B); in _mm_comieq_sd() 475 return __builtin_ia32_comisdlt ((__v2df)__A, (__v2df)__B); in _mm_comilt_sd() 481 return __builtin_ia32_comisdle ((__v2df)__A, (__v2df)__B); in _mm_comile_sd() [all …]
|
H A D | pmmintrin.h | 80 return (__m128d) __builtin_ia32_addsubpd ((__v2df)__X, (__v2df)__Y); in _mm_addsub_pd() 86 return (__m128d) __builtin_ia32_haddpd ((__v2df)__X, (__v2df)__Y); in _mm_hadd_pd() 92 return (__m128d) __builtin_ia32_hsubpd ((__v2df)__X, (__v2df)__Y); in _mm_hsub_pd() 104 return (__m128d) __builtin_ia32_movddup ((__v2df)__X); in _mm_movedup_pd()
|
/openbsd/gnu/gcc/gcc/config/i386/ |
H A D | emmintrin.h | 283 __v2df __tmp = __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B); in _mm_sqrt_sd() 429 (__v2df) in _mm_cmpgt_sd() 439 (__v2df) in _mm_cmpge_sd() 467 (__v2df) in _mm_cmpngt_sd() 477 (__v2df) in _mm_cmpnge_sd() 498 return __builtin_ia32_comisdeq ((__v2df)__A, (__v2df)__B); in _mm_comieq_sd() 504 return __builtin_ia32_comisdlt ((__v2df)__A, (__v2df)__B); in _mm_comilt_sd() 510 return __builtin_ia32_comisdle ((__v2df)__A, (__v2df)__B); in _mm_comile_sd() 516 return __builtin_ia32_comisdgt ((__v2df)__A, (__v2df)__B); in _mm_comigt_sd() 522 return __builtin_ia32_comisdge ((__v2df)__A, (__v2df)__B); in _mm_comige_sd() [all …]
|
H A D | pmmintrin.h | 80 return (__m128d) __builtin_ia32_addsubpd ((__v2df)__X, (__v2df)__Y); in _mm_addsub_pd() 86 return (__m128d) __builtin_ia32_haddpd ((__v2df)__X, (__v2df)__Y); in _mm_hadd_pd() 92 return (__m128d) __builtin_ia32_hsubpd ((__v2df)__X, (__v2df)__Y); in _mm_hsub_pd()
|
/openbsd/gnu/llvm/clang/lib/Headers/ |
H A D | fmaintrin.h | 30 return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_fmadd_pd() 42 return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_fmadd_sd() 54 return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, -(__v2df)__C); in _mm_fmsub_pd() 66 return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, -(__v2df)__C); in _mm_fmsub_sd() 78 return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_fnmadd_pd() 90 return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, -(__v2df)__B, (__v2df)__C); in _mm_fnmadd_sd() 102 return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C); in _mm_fnmsub_pd() 114 return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, -(__v2df)__B, -(__v2df)__C); in _mm_fnmsub_sd() 126 return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_fmaddsub_pd() 138 return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, -(__v2df)__C); in _mm_fmsubadd_pd()
|
H A D | fma4intrin.h | 32 return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_macc_pd() 44 return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_macc_sd() 56 return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, -(__v2df)__C); in _mm_msub_pd() 68 return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, -(__v2df)__C); in _mm_msub_sd() 80 return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_nmacc_pd() 92 return (__m128d)__builtin_ia32_vfmaddsd(-(__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_nmacc_sd() 104 return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C); in _mm_nmsub_pd() 116 return (__m128d)__builtin_ia32_vfmaddsd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C); in _mm_nmsub_sd() 128 return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_maddsub_pd() 140 return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, -(__v2df)__C); in _mm_msubadd_pd()
|
H A D | emmintrin.h | 94 return (__m128d)((__v2df)__a + (__v2df)__b); in _mm_add_pd() 134 return (__m128d)((__v2df)__a - (__v2df)__b); in _mm_sub_pd() 173 return (__m128d)((__v2df)__a * (__v2df)__b); in _mm_mul_pd() 214 return (__m128d)((__v2df)__a / (__v2df)__b); in _mm_div_pd() 277 return __builtin_ia32_minsd((__v2df)__a, (__v2df)__b); in _mm_min_sd() 296 return __builtin_ia32_minpd((__v2df)__a, (__v2df)__b); in _mm_min_pd() 319 return __builtin_ia32_maxsd((__v2df)__a, (__v2df)__b); in _mm_max_sd() 338 return __builtin_ia32_maxpd((__v2df)__a, (__v2df)__b); in _mm_max_pd() 969 return __builtin_ia32_comisdeq((__v2df)__a, (__v2df)__b); in _mm_comieq_sd() 994 return __builtin_ia32_comisdlt((__v2df)__a, (__v2df)__b); in _mm_comilt_sd() [all …]
|
H A D | pmmintrin.h | 167 return __builtin_ia32_addsubpd((__v2df)__a, (__v2df)__b); in _mm_addsub_pd() 190 return __builtin_ia32_haddpd((__v2df)__a, (__v2df)__b); in _mm_hadd_pd() 213 return __builtin_ia32_hsubpd((__v2df)__a, (__v2df)__b); in _mm_hsub_pd() 249 return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0); in _mm_movedup_pd()
|
H A D | avx512erintrin.h | 142 ((__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \ 143 (__v2df)(__m128d)(B), \ 148 ((__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \ 149 (__v2df)(__m128d)(B), \ 245 ((__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \ 246 (__v2df)(__m128d)(B), \ 251 ((__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \ 252 (__v2df)(__m128d)(B), \ 253 (__v2df)(__m128d)(S), \ 257 ((__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \ [all …]
|
H A D | avx512vldqintrin.h | 77 (__v2df)__W); in _mm_mask_andnot_pd() 133 (__v2df)__W); in _mm_mask_and_pd() 189 (__v2df)__W); in _mm_mask_xor_pd() 245 (__v2df)__W); in _mm_mask_or_pd() 285 return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A, in _mm_cvtpd_epi64() 292 return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A, in _mm_mask_cvtpd_epi64() 453 return (__m128d)__builtin_convertvector((__v2di)__A, __v2df); in _mm_cvtepi64_pd() 460 (__v2df)__W); in _mm_mask_cvtepi64_pd() 699 return (__m128d)__builtin_convertvector((__v2du)__A, __v2df); in _mm_cvtepu64_pd() 706 (__v2df)__W); in _mm_mask_cvtepu64_pd() [all …]
|
H A D | avx512fintrin.h | 391 return (__m512d)__builtin_shufflevector((__v2df) __A, (__v2df) __A, in _mm512_broadcastsd_pd() 1647 (__v2df) in _mm_rsqrt14_sd() 1656 (__v2df) __B, in _mm_mask_rsqrt14_sd() 1657 (__v2df) __W, in _mm_mask_rsqrt14_sd() 1665 (__v2df) __B, in _mm_maskz_rsqrt14_sd() 1755 (__v2df) in _mm_rcp14_sd() 1764 (__v2df) __B, in _mm_mask_rcp14_sd() 1765 (__v2df) __W, in _mm_mask_rcp14_sd() 1773 (__v2df) __B, in _mm_maskz_rcp14_sd() 6521 (__v2df)( __B), (__v2df) _mm_setzero_pd(), in _mm_scalef_sd() [all …]
|
H A D | avx512vlintrin.h | 1849 (__v2df) __A, in _mm_mask_compressstoreu_pd() 2517 (__v2df) in _mm_maskz_expandloadu_pd() 2704 (__v2df) in _mm_getexp_pd() 2719 (__v2df) in _mm_maskz_getexp_pd() 3369 (__v2df) in _mm_scalef_pd() 3387 (__v2df) in _mm_maskz_scalef_pd() 5442 (__v2df) in _mm_maskz_load_pd() 5614 (__v2df) in _mm_maskz_loadu_pd() 5674 (__v2df) __A, in _mm_mask_store_pd() 5774 (__v2df) __A, in _mm_mask_storeu_pd() [all …]
|
H A D | avx512dqintrin.h | 1016 ((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \ 1017 (__v2df)(__m128d)(B), \ 1023 ((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \ 1024 (__v2df)(__m128d)(B), \ 1029 ((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \ 1030 (__v2df)(__m128d)(B), \ 1036 ((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \ 1037 (__v2df)(__m128d)(B), \ 1042 ((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \ 1043 (__v2df)(__m128d)(B), \ [all …]
|
H A D | avxintrin.h | 1663 ((__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), \ 1664 (__v2df)(__m128d)(b), (c))) 1902 ((__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), \ 2539 return __builtin_ia32_vtestzpd((__v2df)__a, (__v2df)__b); in _mm_testz_pd() 2568 return __builtin_ia32_vtestcpd((__v2df)__a, (__v2df)__b); in _mm_testc_pd() 2598 return __builtin_ia32_vtestnzcpd((__v2df)__a, (__v2df)__b); in _mm_testnzc_pd() 3078 return (__m256d)__builtin_shufflevector((__v2df)__b, (__v2df)__b, in _mm256_broadcast_pd() 3491 __builtin_ia32_maskstorepd((__v2df *)__p, (__v2di)__m, (__v2df)__a); in _mm_maskstore_pd() 4502 return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 1, -1, -1); in _mm256_castpd128_pd256() 4561 return __builtin_shufflevector((__v2df)__a, (__v2df)_mm_setzero_pd(), 0, 1, 2, 3); in _mm256_zextpd128_pd256() [all …]
|
H A D | smmintrin.h | 315 ((__m128d)__builtin_ia32_roundpd((__v2df)(__m128d)(X), (M))) 356 ((__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), \ 385 ((__m128d)__builtin_ia32_blendpd((__v2df)(__m128d)(V1), \ 386 (__v2df)(__m128d)(V2), (int)(M))) 439 return (__m128d)__builtin_ia32_blendvpd((__v2df)__V1, (__v2df)__V2, in _mm_blendv_pd() 440 (__v2df)__M); in _mm_blendv_pd() 631 ((__m128d)__builtin_ia32_dppd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), \
|
H A D | avx2intrin.h | 722 return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0); in _mm_broadcastsd_pd() 734 return (__m256d)__builtin_shufflevector((__v2df)__X, (__v2df)__X, 0, 0, 0, 0); in _mm256_broadcastsd_pd() 939 ((__m128d)__builtin_ia32_gatherd_pd((__v2df)(__m128i)(a), \ 942 (__v2df)(__m128d)(mask), (s))) 951 ((__m128d)__builtin_ia32_gatherq_pd((__v2df)(__m128d)(a), \ 954 (__v2df)(__m128d)(mask), (s))) 1035 ((__m128d)__builtin_ia32_gatherd_pd((__v2df)_mm_undefined_pd(), \ 1038 (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \ 1052 ((__m128d)__builtin_ia32_gatherq_pd((__v2df)_mm_undefined_pd(), \ 1055 (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \
|
H A D | ammintrin.h | 160 __builtin_ia32_movntsd(__p, (__v2df)__a); in _mm_stream_sd()
|
H A D | xopintrin.h | 713 ((__m128d)__builtin_ia32_vpermil2pd((__v2df)(__m128d)(X), \ 714 (__v2df)(__m128d)(Y), \ 740 return (__m128d)__builtin_ia32_vfrczsd((__v2df)__A); in _mm_frcz_sd() 752 return (__m128d)__builtin_ia32_vfrczpd((__v2df)__A); in _mm_frcz_pd()
|
H A D | avx512fp16intrin.h | 1593 ((__m128h)__builtin_ia32_vcvtsd2sh_round_mask((__v8hf)(A), (__v2df)(B), \ 1599 (__v8hf)(A), (__v2df)(B), (__v8hf)(W), (__mmask8)(U), (int)(R))) 1602 ((__m128h)__builtin_ia32_vcvtsd2sh_round_mask((__v8hf)(A), (__v2df)(B), \ 1609 (__v8hf)__A, (__v2df)__B, (__v8hf)_mm_undefined_ph(), (__mmask8)-1, in _mm_cvtsd_sh() 1618 (__v8hf)__A, (__v2df)__B, (__v8hf)__W, (__mmask8)__U, in _mm_mask_cvtsd_sh() 1625 (__v8hf)__A, (__v2df)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U, in _mm_maskz_cvtsd_sh() 1630 ((__m128d)__builtin_ia32_vcvtsh2sd_round_mask((__v2df)(A), (__v8hf)(B), \ 1636 (__v2df)(A), (__v8hf)(B), (__v2df)(W), (__mmask8)(U), (int)(R))) 1646 (__v2df)__A, (__v8hf)__B, (__v2df)_mm_undefined_pd(), (__mmask8)-1, in _mm_cvtsh_sd() 1655 (__v2df)__A, (__v8hf)__B, (__v2df)__W, (__mmask8)__U, in _mm_mask_cvtsh_sd() [all …]
|
H A D | avx512vlfp16intrin.h | 655 (__v2df)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1); in _mm_cvtpd_ph() 661 return (__m128h)__builtin_ia32_vcvtpd2ph128_mask((__v2df)__A, (__v8hf)__W, in _mm_mask_cvtpd_ph() 668 (__v2df)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U); in _mm_maskz_cvtpd_ph() 690 (__v8hf)__A, (__v2df)_mm_undefined_pd(), (__mmask8)-1); in _mm_cvtph_pd() 696 return (__m128d)__builtin_ia32_vcvtph2pd128_mask((__v8hf)__A, (__v2df)__W, in _mm_mask_cvtph_pd() 703 (__v8hf)__A, (__v2df)_mm_setzero_pd(), (__mmask8)__U); in _mm_maskz_cvtph_pd()
|
/openbsd/gnu/llvm/clang/lib/Headers/ppc_wrappers/ |
H A D | emmintrin.h | 125 __v2df __result = (__v2df)__A; in _mm_move_sd() 249 return (__m128d)((__v2df)__A + (__v2df)__B); in _mm_add_pd() 265 return (__m128d)((__v2df)__A - (__v2df)__B); in _mm_sub_pd() 278 return (__m128d)((__v2df)__A * (__v2df)__B); in _mm_mul_pd() 291 return (__m128d)((__v2df)__A / (__v2df)__B); in _mm_div_pd() 381 __v2df __temp = (__v2df)vec_cmpeq((__v2df)__A, (__v2df)__B); in _mm_cmpneq_pd() 1088 __v2df __result = (__v2df)__A; in _mm_cvtsi32_sd() 1098 __v2df __result = (__v2df)__A; in _mm_cvtsi64_sd() 1122 __v2df __res = (__v2df)__A; in _mm_cvtss_sd() 1168 __v2df __result = (__v2df)__A; in _mm_loadh_pd() [all …]
|
H A D | pmmintrin.h | 59 const __v2df __even_n0 = {-0.0, 0.0}; in _mm_addsub_pd() 60 __v2df __even_neg_Y = vec_xor(__Y, __even_n0); in _mm_addsub_pd() 93 return (__m128d)vec_add(vec_mergeh((__v2df)__X, (__v2df)__Y), in _mm_hadd_pd() 94 vec_mergel((__v2df)__X, (__v2df)__Y)); in _mm_hadd_pd() 100 return (__m128d)vec_sub(vec_mergeh((__v2df)__X, (__v2df)__Y), in _mm_hsub_pd() 101 vec_mergel((__v2df)__X, (__v2df)__Y)); in _mm_hsub_pd()
|
H A D | smmintrin.h | 58 __v2df __r; in _mm_round_pd() 97 __r = vec_rint((__v2df)__A); in _mm_round_pd() 109 __r = vec_floor((__v2df)__A); in _mm_round_pd() 113 __r = vec_ceil((__v2df)__A); in _mm_round_pd() 117 __r = vec_trunc((__v2df)__A); in _mm_round_pd() 120 __r = vec_rint((__v2df)__A); in _mm_round_pd() 142 __v2df __r = {((__v2df)__B)[0], ((__v2df)__A)[1]}; in _mm_round_sd() 384 return (__m128d)vec_blendv((__v2df)__A, (__v2df)__B, (__v2du)__mask); in _mm_blendv_pd()
|
/openbsd/gnu/usr.bin/gcc/gcc/ |
H A D | ChangeLog.7 | 8630 * config/i386/xmmintrin.h (__v2df, __v2di, __v4si, __v8hi, __v16qi):
|