Home
last modified time | relevance | path

Searched refs:__v4sf (Results 1 – 24 of 24) sorted by relevance

/freebsd/contrib/llvm-project/clang/lib/Headers/
H A Dxmmintrin.h80 return (__m128)((__v4sf)__a + (__v4sf)__b); in _mm_add_ps()
123 return (__m128)((__v4sf)__a - (__v4sf)__b); in _mm_sub_ps()
165 return (__m128)((__v4sf)__a * (__v4sf)__b); in _mm_mul_ps()
206 return (__m128)((__v4sf)__a / (__v4sf)__b); in _mm_div_ps()
335 return __builtin_ia32_minss((__v4sf)__a, (__v4sf)__b); in _mm_min_ss()
354 return __builtin_ia32_minps((__v4sf)__a, (__v4sf)__b); in _mm_min_ps()
377 return __builtin_ia32_maxss((__v4sf)__a, (__v4sf)__b); in _mm_max_ss()
396 return __builtin_ia32_maxps((__v4sf)__a, (__v4sf)__b); in _mm_max_ps()
622 (__v4sf)__builtin_ia32_cmpltss((__v4sf)__b, (__v4sf)__a), in _mm_cmpgt_ss()
667 (__v4sf)__builtin_ia32_cmpless((__v4sf)__b, (__v4sf)__a), in _mm_cmpge_ss()
[all …]
H A Dfma4intrin.h26 return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_macc_ps()
38 return (__m128)__builtin_ia32_vfmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_macc_ss()
50 return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); in _mm_msub_ps()
62 return (__m128)__builtin_ia32_vfmaddss((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); in _mm_msub_ss()
74 return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_nmacc_ps()
86 return (__m128)__builtin_ia32_vfmaddss(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_nmacc_ss()
98 return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); in _mm_nmsub_ps()
110 return (__m128)__builtin_ia32_vfmaddss(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); in _mm_nmsub_ss()
122 return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_maddsub_ps()
134 return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); in _mm_msubadd_ps()
H A Dfmaintrin.h38 return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_fmadd_ps()
86 return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_fmadd_ss()
134 return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); in _mm_fmsub_ps()
182 return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); in _mm_fmsub_ss()
230 return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_fnmadd_ps()
278 return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, -(__v4sf)__B, (__v4sf)__C); in _mm_fnmadd_ss()
326 return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); in _mm_fnmsub_ps()
374 return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, -(__v4sf)__B, -(__v4sf)__C); in _mm_fnmsub_ss()
428 return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_fmaddsub_ps()
478 return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); in _mm_fmsubadd_ps()
H A Dpmmintrin.h60 return __builtin_ia32_addsubps((__v4sf)__a, (__v4sf)__b); in _mm_addsub_ps()
83 return __builtin_ia32_haddps((__v4sf)__a, (__v4sf)__b); in _mm_hadd_ps()
106 return __builtin_ia32_hsubps((__v4sf)__a, (__v4sf)__b); in _mm_hsub_ps()
128 return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 1, 1, 3, 3); in _mm_movehdup_ps()
149 return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 0, 2, 2); in _mm_moveldup_ps()
H A Davx512vldqintrin.h111 (__v4sf)__W); in _mm_mask_andnot_ps()
167 (__v4sf)__W); in _mm_mask_and_ps()
223 (__v4sf)__W); in _mm_mask_xor_ps()
279 (__v4sf)__W); in _mm_mask_or_ps()
498 (__v4sf) _mm_setzero_ps(), in _mm_cvtepi64_ps()
505 (__v4sf) __W, in _mm_mask_cvtepi64_ps()
512 (__v4sf) _mm_setzero_ps(), in _mm_maskz_cvtepi64_ps()
744 (__v4sf) _mm_setzero_ps(), in _mm_cvtepu64_ps()
751 (__v4sf) __W, in _mm_mask_cvtepu64_ps()
758 (__v4sf) _mm_setzero_ps(), in _mm_maskz_cvtepu64_ps()
[all …]
H A Davx512erintrin.h115 ((__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
116 (__v4sf)(__m128)(B), \
121 ((__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
122 (__v4sf)(__m128)(B), \
218 ((__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
219 (__v4sf)(__m128)(B), \
224 ((__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
225 (__v4sf)(__m128)(B), \
226 (__v4sf)(__m128)(S), \
230 ((__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
[all …]
H A Davx512vlbf16intrin.h41 return (__m128bh)__builtin_ia32_cvtne2ps2bf16_128((__v4sf) __A, in _mm_cvtne2ps_pbh()
42 (__v4sf) __B); in _mm_cvtne2ps_pbh()
166 ((__m128bh)__builtin_ia32_vcvtneps2bf16128((__v4sf)(A)))
185 return (__m128bh)__builtin_ia32_cvtneps2bf16_128_mask((__v4sf) __A, in _mm_mask_cvtneps_pbh()
205 return (__m128bh)__builtin_ia32_cvtneps2bf16_128_mask((__v4sf) __A, in _mm_maskz_cvtneps_pbh()
278 return (__m128)__builtin_ia32_dpbf16ps_128((__v4sf)__D, in _mm_dpbf16_ps()
303 (__v4sf)_mm_dpbf16_ps(__D, __A, __B), in _mm_mask_dpbf16_ps()
304 (__v4sf)__D); in _mm_mask_dpbf16_ps()
328 (__v4sf)_mm_setzero_si128()); in _mm_maskz_dpbf16_ps()
411 __v4sf __V = {__A, 0, 0, 0}; in _mm_cvtness_sbh()
[all …]
H A Davx512fintrin.h345 return (__m512)__builtin_shufflevector((__v4sf) __A, (__v4sf) __A, in _mm512_broadcastss_ps()
1633 (__v4sf) in _mm_rsqrt14_ss()
1642 (__v4sf) __B, in _mm_mask_rsqrt14_ss()
1643 (__v4sf) __W, in _mm_mask_rsqrt14_ss()
1651 (__v4sf) __B, in _mm_maskz_rsqrt14_ss()
1750 (__v4sf) __B, in _mm_mask_rcp14_ss()
1751 (__v4sf) __W, in _mm_mask_rcp14_ss()
1759 (__v4sf) __B, in _mm_maskz_rcp14_ss()
6582 (__v4sf)( __B), (__v4sf) _mm_setzero_ps(), in _mm_scalef_ss()
6828 return (__m512)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A, in _mm512_broadcast_f32x4()
[all …]
H A Davx512vlintrin.h2003 (__v4sf) in _mm_maskz_cvtpd_ps()
2591 (__v4sf) in _mm_maskz_expandloadu_ps()
2756 (__v4sf) in _mm_getexp_ps()
2771 (__v4sf) in _mm_maskz_getexp_ps()
3429 (__v4sf) in _mm_scalef_ps()
3446 (__v4sf) in _mm_maskz_scalef_ps()
5482 (__v4sf) in _mm_maskz_load_ps()
5654 (__v4sf) in _mm_maskz_loadu_ps()
5993 (__v4sf) in _mm_rcp14_ps()
6798 return (__m256)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A, in _mm256_broadcast_f32x4()
[all …]
H A Davx512fp16intrin.h2947 (__v4sf)__A, (__v4sf)(__B), (__v4sf)(__C), __U, _MM_FROUND_CUR_DIRECTION); in _mm_mask_fcmadd_sch()
2960 (__v4sf)__A, (__v4sf)__B, (__v4sf)__C, __U, _MM_FROUND_CUR_DIRECTION); in _mm_mask3_fcmadd_sch()
2965 (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C), \
2970 (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C), \
2975 (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C), \
2980 (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C), \
3007 (__v4sf)__A, (__v4sf)__B, (__v4sf)__C, __U, _MM_FROUND_CUR_DIRECTION); in _mm_mask3_fmadd_sch()
3033 (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_undefined_ph(), (__mmask8)-1, in _mm_fcmul_sch()
3047 (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ph(), (__mmask8)__U, in _mm_maskz_fcmul_sch()
3069 (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_undefined_ph(), (__mmask8)-1, in _mm_fmul_sch()
[all …]
H A Dsmmintrin.h240 ((__m128)__builtin_ia32_roundps((__v4sf)(__m128)(X), (M)))
281 ((__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \
413 ((__m128)__builtin_ia32_blendps((__v4sf)(__m128)(V1), (__v4sf)(__m128)(V2), \
466 return (__m128)__builtin_ia32_blendvps((__v4sf)__V1, (__v4sf)__V2, in _mm_blendv_ps()
467 (__v4sf)__M); in _mm_blendv_ps()
597 ((__m128)__builtin_ia32_dpps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (M)))
864 int, __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)))
870 (D) = __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)); \
H A Df16cintrin.h42 __v4sf __r = __builtin_ia32_vcvtph2ps(__v); in _cvtsh_ss()
69 (unsigned short)(((__v8hi)__builtin_ia32_vcvtps2ph((__v4sf){a, 0, 0, 0}, \
96 ((__m128i)__builtin_ia32_vcvtps2ph((__v4sf)(__m128)(a), (imm)))
H A Davx512vlfp16intrin.h350 (__mmask8)__U, (__v4sf)_mm_conj_pch(__A), (__v4sf)__W); in _mm_mask_conj_pch()
356 (__mmask8)__U, (__v4sf)_mm_conj_pch(__A), (__v4sf)_mm_setzero_ps()); in _mm_maskz_conj_pch()
1794 (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_undefined_ph(), (__mmask8)-1); in _mm_fcmul_pch()
1806 (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ph(), (__mmask8)__U); in _mm_maskz_fcmul_pch()
1838 __builtin_ia32_vfcmaddcph128_mask((__v4sf)__A, (__v4sf)(__m128h)__B, in _mm_mask_fcmadd_pch()
1840 (__v4sf)__A); in _mm_mask_fcmadd_pch()
1852 (__v4sf)__A, (__v4sf)__B, (__v4sf)__C, (__mmask8)__U); in _mm_maskz_fcmadd_pch()
1886 (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_undefined_ph(), (__mmask8)-1); in _mm_fmul_pch()
1900 (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ph(), (__mmask8)__U); in _mm_maskz_fmul_pch()
1932 __builtin_ia32_vfmaddcph128_mask((__v4sf)__A, (__v4sf)__B, (__v4sf)__C, in _mm_mask_fmadd_pch()
[all …]
H A Davxintrin.h1727 ((__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), \
1728 (__v4sf)(__m128)(b), (c)))
1965 ((__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), \
2631 return __builtin_ia32_vtestzps((__v4sf)__a, (__v4sf)__b); in _mm_testz_ps()
2660 return __builtin_ia32_vtestcps((__v4sf)__a, (__v4sf)__b); in _mm_testc_ps()
2690 return __builtin_ia32_vtestnzcps((__v4sf)__a, (__v4sf)__b); in _mm_testnzc_ps()
3111 return (__m256)__builtin_shufflevector((__v4sf)__b, (__v4sf)__b, in _mm256_broadcast_ps()
3552 __builtin_ia32_maskstoreps((__v4sf *)__p, (__v4si)__m, (__v4sf)__a); in _mm_maskstore_ps()
4537 return __builtin_shufflevector((__v4sf)__a, in _mm256_castps128_ps256()
4596 return __builtin_shufflevector((__v4sf)__a, (__v4sf)_mm_setzero_ps(), 0, 1, 2, 3, 4, 5, 6, 7); in _mm256_zextps128_ps256()
[all …]
H A Davx512dqintrin.h981 ((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
982 (__v4sf)(__m128)(B), \
987 ((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
988 (__v4sf)(__m128)(B), \
993 ((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
994 (__v4sf)(__m128)(B), \
1000 ((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
1001 (__v4sf)(__m128)(B), \
1006 ((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
1007 (__v4sf)(__m128)(B), \
[all …]
H A Davx2intrin.h3006 return (__m128)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0); in _mm_broadcastss_ps()
3040 return (__m256)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0, 0, 0, 0, 0); in _mm256_broadcastss_ps()
4192 ((__m128)__builtin_ia32_gatherd_ps((__v4sf)(__m128)(a), \
4195 (__v4sf)(__m128)(mask), (s)))
4291 ((__m128)__builtin_ia32_gatherq_ps((__v4sf)(__m128)(a), \
4294 (__v4sf)(__m128)(mask), (s)))
4339 ((__m128)__builtin_ia32_gatherq_ps256((__v4sf)(__m128)(a), \
4342 (__v4sf)(__m128)(mask), (s)))
4903 ((__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_undefined_ps(), \
4976 ((__m128)__builtin_ia32_gatherq_ps((__v4sf)_mm_undefined_ps(), \
[all …]
H A Dammintrin.h178 __builtin_ia32_movntss((float *)__p, (__v4sf)__a); in _mm_stream_ss()
H A Davxneconvertintrin.h449 return (__m128bh)__builtin_ia32_vcvtneps2bf16128((__v4sf)__A); in _mm_cvtneps_avx_pbh()
H A Dxopintrin.h723 ((__m128)__builtin_ia32_vpermil2ps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \
734 return (__m128)__builtin_ia32_vfrczss((__v4sf)__A); in _mm_frcz_ss()
746 return (__m128)__builtin_ia32_vfrczps((__v4sf)__A); in _mm_frcz_ps()
H A Demmintrin.h1278 __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1), __v2df); in _mm_cvtps_pd()
1355 return (__m128)__builtin_ia32_cvtsd2ss((__v4sf)__a, (__v2df)__b); in _mm_cvtsd_ss()
3251 return (__m128) __builtin_convertvector((__v4si)__a, __v4sf); in _mm_cvtepi32_ps()
3265 return (__m128i)__builtin_ia32_cvtps2dq((__v4sf)__a); in _mm_cvtps_epi32()
3280 return (__m128i)__builtin_ia32_cvttps2dq((__v4sf)__a); in _mm_cvttps_epi32()
/freebsd/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/
H A Dxmmintrin.h150 vec_st((__v4sf)__A, 0, (__v4sf *)__P); in _mm_store_ps()
178 __v4sf __va = vec_splat((__v4sf)__A, 0); in _mm_store1_ps()
201 return (vec_sel((__v4sf)__A, (__v4sf)__B, __mask)); in _mm_move_ss()
330 return (__m128)((__v4sf)__A + (__v4sf)__B); in _mm_add_ps()
336 return (__m128)((__v4sf)__A - (__v4sf)__B); in _mm_sub_ps()
342 return (__m128)((__v4sf)__A * (__v4sf)__B); in _mm_mul_ps()
348 return (__m128)((__v4sf)__A / (__v4sf)__B); in _mm_div_ps()
453 return ((__m128)vec_and((__v4sf)__A, (__v4sf)__B)); in _mm_and_ps()
466 return ((__m128)vec_or((__v4sf)__A, (__v4sf)__B)); in _mm_or_ps()
472 return ((__m128)vec_xor((__v4sf)__A, (__v4sf)__B)); in _mm_xor_ps()
[all …]
H A Dpmmintrin.h51 const __v4sf __even_n0 = {-0.0, 0.0, -0.0, 0.0}; in _mm_addsub_ps()
52 __v4sf __even_neg_Y = vec_xor(__Y, __even_n0); in _mm_addsub_ps()
73 return (__m128)vec_add(vec_perm((__v4sf)__X, (__v4sf)__Y, __xform2), in _mm_hadd_ps()
74 vec_perm((__v4sf)__X, (__v4sf)__Y, __xform1)); in _mm_hadd_ps()
86 return (__m128)vec_sub(vec_perm((__v4sf)__X, (__v4sf)__Y, __xform2), in _mm_hsub_ps()
87 vec_perm((__v4sf)__X, (__v4sf)__Y, __xform1)); in _mm_hsub_ps()
H A Dsmmintrin.h159 __v4sf __r; in _mm_round_ps()
203 __r = vec_rint((__v4sf)__A); in _mm_round_ps()
215 __r = vec_floor((__v4sf)__A); in _mm_round_ps()
219 __r = vec_ceil((__v4sf)__A); in _mm_round_ps()
223 __r = vec_trunc((__v4sf)__A); in _mm_round_ps()
226 __r = vec_rint((__v4sf)__A); in _mm_round_ps()
253 __v4sf __r = (__v4sf)__A; in _mm_round_ss()
254 __r[0] = ((__v4sf)__B)[0]; in _mm_round_ss()
379 return (__m128)vec_blendv((__v4sf)__A, (__v4sf)__B, (__v4su)__mask); in _mm_blendv_ps()
H A Demmintrin.h881 __v4sf __result; in _mm_cvtpd_ps()
967 __v4sf __rounded; in _mm_cvtps_epi32()
970 __rounded = vec_rint((__v4sf)__A); in _mm_cvtps_epi32()
980 __result = vec_cts((__v4sf)__A, 0); in _mm_cvttps_epi32()
989 return (__m128d)vec_doubleh((__v4sf)__A); in _mm_cvtps_pd()
993 __v4sf __a = (__v4sf)__A; in _mm_cvtps_pd()
994 __v4sf __temp; in _mm_cvtps_pd()
1067 __v4sf __result = (__v4sf)__A; in _mm_cvtsd_ss()
1070 __v4sf __temp_s; in _mm_cvtsd_ss()
1117 __v4sf __temp = vec_splat((__v4sf)__B, 0); in _mm_cvtss_sd()
[all …]