1 // REQUIRES: x86-registered-target
2 // RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512dq -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefix=UNCONSTRAINED --check-prefix=COMMON --check-prefix=COMMONIR
3 // RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512dq -ffp-exception-behavior=maytrap -DSTRICT=1 -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefix=CONSTRAINED --check-prefix=COMMON --check-prefix=COMMONIR
4 // RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512dq -S -o - -Wall -Werror | FileCheck %s --check-prefix=CHECK-ASM --check-prefix=COMMON
5 // RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512dq -ffp-exception-behavior=maytrap -DSTRICT=1 -S -o - -Wall -Werror | FileCheck %s --check-prefix=CHECK-ASM --check-prefix=COMMON
6
7 // FIXME: Every instance of "fpexcept.maytrap" is wrong.
8 #ifdef STRICT
9 // Test that the constrained intrinsics are picking up the exception
10 // metadata from the AST instead of the global default from the command line.
11
12 #pragma float_control(except, on)
13 #endif
14
15
16 #include <immintrin.h>
17
test_mm512_cvtepi64_pd(__m512i __A)18 __m512d test_mm512_cvtepi64_pd(__m512i __A) {
19 // COMMON-LABEL: test_mm512_cvtepi64_pd
20 // UNCONSTRAINED: sitofp <8 x i64> %{{.*}} to <8 x double>
21 // CONSTRAINED: call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i64(<8 x i64> %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
22 // CHECK-ASM: vcvtqq2pd
23 return _mm512_cvtepi64_pd(__A);
24 }
25
test_mm512_mask_cvtepi64_pd(__m512d __W,__mmask8 __U,__m512i __A)26 __m512d test_mm512_mask_cvtepi64_pd(__m512d __W, __mmask8 __U, __m512i __A) {
27 // COMMON-LABEL: test_mm512_mask_cvtepi64_pd
28 // UNCONSTRAINED: sitofp <8 x i64> %{{.*}} to <8 x double>
29 // CONSTRAINED: call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i64(<8 x i64> %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
30 // COMMONIR: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
31 // CHECK-ASM: vcvtqq2pd
32 return _mm512_mask_cvtepi64_pd(__W, __U, __A);
33 }
34
test_mm512_maskz_cvtepi64_pd(__mmask8 __U,__m512i __A)35 __m512d test_mm512_maskz_cvtepi64_pd(__mmask8 __U, __m512i __A) {
36 // COMMON-LABEL: test_mm512_maskz_cvtepi64_pd
37 // UNCONSTRAINED: sitofp <8 x i64> %{{.*}} to <8 x double>
38 // CONSTRAINED: call <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i64(<8 x i64> %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
39 // COMMONIR: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
40 // CHECK-ASM: vcvtqq2pd
41 return _mm512_maskz_cvtepi64_pd(__U, __A);
42 }
43
test_mm512_cvt_roundepi64_pd(__m512i __A)44 __m512d test_mm512_cvt_roundepi64_pd(__m512i __A) {
45 // COMMON-LABEL: test_mm512_cvt_roundepi64_pd
46 // COMMONIR: @llvm.x86.avx512.sitofp.round.v8f64.v8i64
47 // CHECK-ASM: vcvtqq2pd
48 return _mm512_cvt_roundepi64_pd(__A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
49 }
50
test_mm512_mask_cvt_roundepi64_pd(__m512d __W,__mmask8 __U,__m512i __A)51 __m512d test_mm512_mask_cvt_roundepi64_pd(__m512d __W, __mmask8 __U, __m512i __A) {
52 // COMMON-LABEL: test_mm512_mask_cvt_roundepi64_pd
53 // COMMONIR: @llvm.x86.avx512.sitofp.round.v8f64.v8i64
54 // COMMONIR: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
55 // CHECK-ASM: vcvtqq2pd
56 return _mm512_mask_cvt_roundepi64_pd(__W, __U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
57 }
58
test_mm512_maskz_cvt_roundepi64_pd(__mmask8 __U,__m512i __A)59 __m512d test_mm512_maskz_cvt_roundepi64_pd(__mmask8 __U, __m512i __A) {
60 // COMMON-LABEL: test_mm512_maskz_cvt_roundepi64_pd
61 // COMMONIR: @llvm.x86.avx512.sitofp.round.v8f64.v8i64
62 // COMMONIR: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
63 // CHECK-ASM: vcvtqq2pd
64 return _mm512_maskz_cvt_roundepi64_pd(__U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
65 }
66
test_mm512_cvtepi64_ps(__m512i __A)67 __m256 test_mm512_cvtepi64_ps(__m512i __A) {
68 // COMMON-LABEL: test_mm512_cvtepi64_ps
69 // UNCONSTRAINED: sitofp <8 x i64> %{{.*}} to <8 x float>
70 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
71 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
72 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
73 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
74 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
75 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
76 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
77 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
78 // CONSTRAINED: call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i64(<8 x i64> %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
79 // CHECK-ASM: vcvtqq2ps
80 return _mm512_cvtepi64_ps(__A);
81 }
82
test_mm512_mask_cvtepi64_ps(__m256 __W,__mmask8 __U,__m512i __A)83 __m256 test_mm512_mask_cvtepi64_ps(__m256 __W, __mmask8 __U, __m512i __A) {
84 // COMMON-LABEL: test_mm512_mask_cvtepi64_ps
85 // UNCONSTRAINED: sitofp <8 x i64> %{{.*}} to <8 x float>
86 // CONSTRAINED: call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i64(<8 x i64> %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
87 // COMMONIR: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
88 // CHECK-ASM: vcvtqq2ps
89 return _mm512_mask_cvtepi64_ps(__W, __U, __A);
90 }
91
test_mm512_maskz_cvtepi64_ps(__mmask8 __U,__m512i __A)92 __m256 test_mm512_maskz_cvtepi64_ps(__mmask8 __U, __m512i __A) {
93 // COMMON-LABEL: test_mm512_maskz_cvtepi64_ps
94 // UNCONSTRAINED: sitofp <8 x i64> %{{.*}} to <8 x float>
95 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
96 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
97 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
98 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
99 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
100 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
101 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
102 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
103 // CONSTRAINED: call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i64(<8 x i64> %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
104 // COMMONIR: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
105 // CHECK-ASM: vcvtqq2ps
106 return _mm512_maskz_cvtepi64_ps(__U, __A);
107 }
108
test_mm512_cvt_roundepi64_ps(__m512i __A)109 __m256 test_mm512_cvt_roundepi64_ps(__m512i __A) {
110 // COMMON-LABEL: test_mm512_cvt_roundepi64_ps
111 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
112 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
113 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
114 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
115 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
116 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
117 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
118 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
119 // COMMONIR: @llvm.x86.avx512.sitofp.round.v8f32.v8i64
120 // CHECK-ASM: vcvtqq2ps
121 return _mm512_cvt_roundepi64_ps(__A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
122 }
123
test_mm512_mask_cvt_roundepi64_ps(__m256 __W,__mmask8 __U,__m512i __A)124 __m256 test_mm512_mask_cvt_roundepi64_ps(__m256 __W, __mmask8 __U, __m512i __A) {
125 // COMMON-LABEL: test_mm512_mask_cvt_roundepi64_ps
126 // COMMONIR: @llvm.x86.avx512.sitofp.round.v8f32.v8i64
127 // COMMONIR: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
128 // CHECK-ASM: vcvtqq2ps
129 return _mm512_mask_cvt_roundepi64_ps(__W, __U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
130 }
131
test_mm512_maskz_cvt_roundepi64_ps(__mmask8 __U,__m512i __A)132 __m256 test_mm512_maskz_cvt_roundepi64_ps(__mmask8 __U, __m512i __A) {
133 // COMMON-LABEL: test_mm512_maskz_cvt_roundepi64_ps
134 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
135 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
136 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
137 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
138 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
139 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
140 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
141 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
142 // COMMONIR: @llvm.x86.avx512.sitofp.round.v8f32.v8i64
143 // COMMONIR: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
144 // CHECK-ASM: vcvtqq2ps
145 return _mm512_maskz_cvt_roundepi64_ps(__U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
146 }
147
test_mm512_cvtepu64_pd(__m512i __A)148 __m512d test_mm512_cvtepu64_pd(__m512i __A) {
149 // COMMON-LABEL: test_mm512_cvtepu64_pd
150 // UNCONSTRAINED: uitofp <8 x i64> %{{.*}} to <8 x double>
151 // CONSTRAINED: call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i64(<8 x i64> %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
152 // CHECK-ASM: vcvtuqq2pd
153 return _mm512_cvtepu64_pd(__A);
154 }
155
test_mm512_mask_cvtepu64_pd(__m512d __W,__mmask8 __U,__m512i __A)156 __m512d test_mm512_mask_cvtepu64_pd(__m512d __W, __mmask8 __U, __m512i __A) {
157 // COMMON-LABEL: test_mm512_mask_cvtepu64_pd
158 // UNCONSTRAINED: uitofp <8 x i64> %{{.*}} to <8 x double>
159 // CONSTRAINED: call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i64(<8 x i64> %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
160 // COMMONIR: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
161 // CHECK-ASM: vcvtuqq2pd
162 return _mm512_mask_cvtepu64_pd(__W, __U, __A);
163 }
164
test_mm512_maskz_cvtepu64_pd(__mmask8 __U,__m512i __A)165 __m512d test_mm512_maskz_cvtepu64_pd(__mmask8 __U, __m512i __A) {
166 // COMMON-LABEL: test_mm512_maskz_cvtepu64_pd
167 // UNCONSTRAINED: uitofp <8 x i64> %{{.*}} to <8 x double>
168 // CONSTRAINED: call <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i64(<8 x i64> %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
169 // COMMONIR: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
170 // CHECK-ASM: vcvtuqq2pd
171 return _mm512_maskz_cvtepu64_pd(__U, __A);
172 }
173
test_mm512_cvt_roundepu64_pd(__m512i __A)174 __m512d test_mm512_cvt_roundepu64_pd(__m512i __A) {
175 // COMMON-LABEL: test_mm512_cvt_roundepu64_pd
176 // COMMONIR: @llvm.x86.avx512.uitofp.round.v8f64.v8i64
177 // CHECK-ASM: vcvtuqq2pd
178 return _mm512_cvt_roundepu64_pd(__A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
179 }
180
test_mm512_mask_cvt_roundepu64_pd(__m512d __W,__mmask8 __U,__m512i __A)181 __m512d test_mm512_mask_cvt_roundepu64_pd(__m512d __W, __mmask8 __U, __m512i __A) {
182 // COMMON-LABEL: test_mm512_mask_cvt_roundepu64_pd
183 // COMMONIR: @llvm.x86.avx512.uitofp.round.v8f64.v8i64
184 // COMMONIR: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
185 // CHECK-ASM: vcvtuqq2pd
186 return _mm512_mask_cvt_roundepu64_pd(__W, __U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
187 }
188
test_mm512_maskz_cvt_roundepu64_pd(__mmask8 __U,__m512i __A)189 __m512d test_mm512_maskz_cvt_roundepu64_pd(__mmask8 __U, __m512i __A) {
190 // COMMON-LABEL: test_mm512_maskz_cvt_roundepu64_pd
191 // COMMONIR: @llvm.x86.avx512.uitofp.round.v8f64.v8i64
192 // COMMONIR: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
193 // CHECK-ASM: vcvtuqq2pd
194 return _mm512_maskz_cvt_roundepu64_pd(__U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
195 }
196
test_mm512_cvtepu64_ps(__m512i __A)197 __m256 test_mm512_cvtepu64_ps(__m512i __A) {
198 // COMMON-LABEL: test_mm512_cvtepu64_ps
199 // UNCONSTRAINED: uitofp <8 x i64> %{{.*}} to <8 x float>
200 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
201 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
202 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
203 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
204 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
205 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
206 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
207 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
208 // CONSTRAINED: call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i64(<8 x i64> %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
209 // CHECK-ASM: vcvtuqq2ps
210 return _mm512_cvtepu64_ps(__A);
211 }
212
test_mm512_mask_cvtepu64_ps(__m256 __W,__mmask8 __U,__m512i __A)213 __m256 test_mm512_mask_cvtepu64_ps(__m256 __W, __mmask8 __U, __m512i __A) {
214 // COMMON-LABEL: test_mm512_mask_cvtepu64_ps
215 // UNCONSTRAINED: uitofp <8 x i64> %{{.*}} to <8 x float>
216 // CONSTRAINED: call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i64(<8 x i64> %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
217 // COMMONIR: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
218 // CHECK-ASM: vcvtuqq2ps
219 return _mm512_mask_cvtepu64_ps(__W, __U, __A);
220 }
221
test_mm512_maskz_cvtepu64_ps(__mmask8 __U,__m512i __A)222 __m256 test_mm512_maskz_cvtepu64_ps(__mmask8 __U, __m512i __A) {
223 // COMMON-LABEL: test_mm512_maskz_cvtepu64_ps
224 // UNCONSTRAINED: uitofp <8 x i64> %{{.*}} to <8 x float>
225 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
226 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
227 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
228 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
229 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
230 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
231 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
232 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
233 // CONSTRAINED: call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i64(<8 x i64> %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
234 // COMMONIR: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
235 // CHECK-ASM: vcvtuqq2ps
236 return _mm512_maskz_cvtepu64_ps(__U, __A);
237 }
238
test_mm512_cvt_roundepu64_ps(__m512i __A)239 __m256 test_mm512_cvt_roundepu64_ps(__m512i __A) {
240 // COMMON-LABEL: test_mm512_cvt_roundepu64_ps
241 // COMMONIR: @llvm.x86.avx512.uitofp.round.v8f32.v8i64
242 // CHECK-ASM: vcvtuqq2ps
243 return _mm512_cvt_roundepu64_ps(__A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
244 }
245
test_mm512_mask_cvt_roundepu64_ps(__m256 __W,__mmask8 __U,__m512i __A)246 __m256 test_mm512_mask_cvt_roundepu64_ps(__m256 __W, __mmask8 __U, __m512i __A) {
247 // COMMON-LABEL: test_mm512_mask_cvt_roundepu64_ps
248 // COMMONIR: @llvm.x86.avx512.uitofp.round.v8f32.v8i64
249 // COMMONIR: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
250 // CHECK-ASM: vcvtuqq2ps
251 return _mm512_mask_cvt_roundepu64_ps(__W, __U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
252 }
253
test_mm512_maskz_cvt_roundepu64_ps(__mmask8 __U,__m512i __A)254 __m256 test_mm512_maskz_cvt_roundepu64_ps(__mmask8 __U, __m512i __A) {
255 // COMMON-LABEL: test_mm512_maskz_cvt_roundepu64_ps
256 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
257 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
258 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
259 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
260 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
261 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
262 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
263 // CONSTRAINED: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 0, metadata !"round.tonearest", metadata !"fpexcept.strict")
264 // COMMONIR: @llvm.x86.avx512.uitofp.round.v8f32.v8i64
265 // COMMONIR: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
266 // CHECK-ASM: vcvtuqq2ps
267 return _mm512_maskz_cvt_roundepu64_ps(__U, __A, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
268 }
269
270