1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -O3 -mtriple=i686-pc-linux -mattr=+cmov < %s | FileCheck %s --check-prefix=X87
3; RUN: llc -O3 -mtriple=i686-pc-linux -mattr=sse2 < %s | FileCheck %s --check-prefix=X86-SSE
4; RUN: llc -O3 -mtriple=x86_64-pc-linux < %s | FileCheck %s --check-prefix=SSE
5; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx < %s | FileCheck %s --check-prefixes=AVX,AVX1
6; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx512f < %s | FileCheck %s --check-prefixes=AVX,AVX512
7; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx512dq < %s | FileCheck %s --check-prefixes=AVX,AVX512
8
9; Verify that constants aren't folded to inexact results when the rounding mode
10; is unknown.
11;
12; double f1() {
13;   // Because 0.1 cannot be represented exactly, this shouldn't be folded.
14;   return 1.0/10.0;
15; }
16;
17define double @f1() #0 {
18; X87-LABEL: f1:
19; X87:       # %bb.0: # %entry
20; X87-NEXT:    fld1
21; X87-NEXT:    fdivs {{\.?LCPI[0-9]+_[0-9]+}}
22; X87-NEXT:    wait
23; X87-NEXT:    retl
24;
25; X86-SSE-LABEL: f1:
26; X86-SSE:       # %bb.0: # %entry
27; X86-SSE-NEXT:    subl $12, %esp
28; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
29; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
30; X86-SSE-NEXT:    divsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
31; X86-SSE-NEXT:    movsd %xmm0, (%esp)
32; X86-SSE-NEXT:    fldl (%esp)
33; X86-SSE-NEXT:    wait
34; X86-SSE-NEXT:    addl $12, %esp
35; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
36; X86-SSE-NEXT:    retl
37;
38; SSE-LABEL: f1:
39; SSE:       # %bb.0: # %entry
40; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
41; SSE-NEXT:    divsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
42; SSE-NEXT:    retq
43;
44; AVX-LABEL: f1:
45; AVX:       # %bb.0: # %entry
46; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
47; AVX-NEXT:    vdivsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
48; AVX-NEXT:    retq
49entry:
50  %div = call double @llvm.experimental.constrained.fdiv.f64(
51                                               double 1.000000e+00,
52                                               double 1.000000e+01,
53                                               metadata !"round.dynamic",
54                                               metadata !"fpexcept.strict") #0
55  ret double %div
56}
57
58; Verify that 'a - 0' isn't simplified to 'a' when the rounding mode is unknown.
59;
60; double f2(double a) {
61;   // Because the result of '0 - 0' is negative zero if rounding mode is
62;   // downward, this shouldn't be simplified.
63;   return a - 0;
64; }
65;
66define double @f2(double %a) #0 {
67; X87-LABEL: f2:
68; X87:       # %bb.0: # %entry
69; X87-NEXT:    fldz
70; X87-NEXT:    fsubrl {{[0-9]+}}(%esp)
71; X87-NEXT:    wait
72; X87-NEXT:    retl
73;
74; X86-SSE-LABEL: f2:
75; X86-SSE:       # %bb.0: # %entry
76; X86-SSE-NEXT:    subl $12, %esp
77; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
78; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
79; X86-SSE-NEXT:    xorpd %xmm1, %xmm1
80; X86-SSE-NEXT:    subsd %xmm1, %xmm0
81; X86-SSE-NEXT:    movsd %xmm0, (%esp)
82; X86-SSE-NEXT:    fldl (%esp)
83; X86-SSE-NEXT:    wait
84; X86-SSE-NEXT:    addl $12, %esp
85; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
86; X86-SSE-NEXT:    retl
87;
88; SSE-LABEL: f2:
89; SSE:       # %bb.0: # %entry
90; SSE-NEXT:    xorpd %xmm1, %xmm1
91; SSE-NEXT:    subsd %xmm1, %xmm0
92; SSE-NEXT:    retq
93;
94; AVX-LABEL: f2:
95; AVX:       # %bb.0: # %entry
96; AVX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
97; AVX-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
98; AVX-NEXT:    retq
99entry:
100  %sub = call double @llvm.experimental.constrained.fsub.f64(
101                                               double %a,
102                                               double 0.000000e+00,
103                                               metadata !"round.dynamic",
104                                               metadata !"fpexcept.strict") #0
105  ret double %sub
106}
107
108; Verify that '-((-a)*b)' isn't simplified to 'a*b' when the rounding mode is
109; unknown.
110;
111; double f3(double a, double b) {
112;   // Because the intermediate value involved in this calculation may require
113;   // rounding, this shouldn't be simplified.
114;   return -((-a)*b);
115; }
116;
117define double @f3(double %a, double %b) #0 {
118; X87-LABEL: f3:
119; X87:       # %bb.0: # %entry
120; X87-NEXT:    fldz
121; X87-NEXT:    fchs
122; X87-NEXT:    fld %st(0)
123; X87-NEXT:    fsubl {{[0-9]+}}(%esp)
124; X87-NEXT:    fmull {{[0-9]+}}(%esp)
125; X87-NEXT:    fsubrp %st, %st(1)
126; X87-NEXT:    wait
127; X87-NEXT:    retl
128;
129; X86-SSE-LABEL: f3:
130; X86-SSE:       # %bb.0: # %entry
131; X86-SSE-NEXT:    subl $12, %esp
132; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
133; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
134; X86-SSE-NEXT:    movapd %xmm0, %xmm1
135; X86-SSE-NEXT:    subsd {{[0-9]+}}(%esp), %xmm1
136; X86-SSE-NEXT:    mulsd {{[0-9]+}}(%esp), %xmm1
137; X86-SSE-NEXT:    subsd %xmm1, %xmm0
138; X86-SSE-NEXT:    movsd %xmm0, (%esp)
139; X86-SSE-NEXT:    fldl (%esp)
140; X86-SSE-NEXT:    wait
141; X86-SSE-NEXT:    addl $12, %esp
142; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
143; X86-SSE-NEXT:    retl
144;
145; SSE-LABEL: f3:
146; SSE:       # %bb.0: # %entry
147; SSE-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
148; SSE-NEXT:    movapd %xmm2, %xmm3
149; SSE-NEXT:    subsd %xmm0, %xmm3
150; SSE-NEXT:    mulsd %xmm1, %xmm3
151; SSE-NEXT:    subsd %xmm3, %xmm2
152; SSE-NEXT:    movapd %xmm2, %xmm0
153; SSE-NEXT:    retq
154;
155; AVX-LABEL: f3:
156; AVX:       # %bb.0: # %entry
157; AVX-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
158; AVX-NEXT:    vsubsd %xmm0, %xmm2, %xmm0
159; AVX-NEXT:    vmulsd %xmm1, %xmm0, %xmm0
160; AVX-NEXT:    vsubsd %xmm0, %xmm2, %xmm0
161; AVX-NEXT:    retq
162entry:
163  %sub = call double @llvm.experimental.constrained.fsub.f64(
164                                               double -0.000000e+00, double %a,
165                                               metadata !"round.dynamic",
166                                               metadata !"fpexcept.strict") #0
167  %mul = call double @llvm.experimental.constrained.fmul.f64(
168                                               double %sub, double %b,
169                                               metadata !"round.dynamic",
170                                               metadata !"fpexcept.strict") #0
171  %ret = call double @llvm.experimental.constrained.fsub.f64(
172                                               double -0.000000e+00,
173                                               double %mul,
174                                               metadata !"round.dynamic",
175                                               metadata !"fpexcept.strict") #0
176  ret double %ret
177}
178
179; Verify that FP operations are not performed speculatively when FP exceptions
180; are not being ignored.
181;
182; double f4(int n, double a) {
183;   // Because a + 1 may overflow, this should not be simplified.
184;   if (n > 0)
185;     return a + 1.0;
186;   return a;
187; }
188;
189;
190define double @f4(i32 %n, double %a) #0 {
191; X87-LABEL: f4:
192; X87:       # %bb.0: # %entry
193; X87-NEXT:    fldl {{[0-9]+}}(%esp)
194; X87-NEXT:    wait
195; X87-NEXT:    cmpl $0, {{[0-9]+}}(%esp)
196; X87-NEXT:    jle .LBB3_2
197; X87-NEXT:  # %bb.1: # %if.then
198; X87-NEXT:    fld1
199; X87-NEXT:    faddp %st, %st(1)
200; X87-NEXT:    wait
201; X87-NEXT:  .LBB3_2: # %if.end
202; X87-NEXT:    retl
203;
204; X86-SSE-LABEL: f4:
205; X86-SSE:       # %bb.0: # %entry
206; X86-SSE-NEXT:    subl $12, %esp
207; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
208; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
209; X86-SSE-NEXT:    cmpl $0, {{[0-9]+}}(%esp)
210; X86-SSE-NEXT:    jle .LBB3_2
211; X86-SSE-NEXT:  # %bb.1: # %if.then
212; X86-SSE-NEXT:    addsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
213; X86-SSE-NEXT:  .LBB3_2: # %if.end
214; X86-SSE-NEXT:    movsd %xmm0, (%esp)
215; X86-SSE-NEXT:    fldl (%esp)
216; X86-SSE-NEXT:    wait
217; X86-SSE-NEXT:    addl $12, %esp
218; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
219; X86-SSE-NEXT:    retl
220;
221; SSE-LABEL: f4:
222; SSE:       # %bb.0: # %entry
223; SSE-NEXT:    testl %edi, %edi
224; SSE-NEXT:    jle .LBB3_2
225; SSE-NEXT:  # %bb.1: # %if.then
226; SSE-NEXT:    addsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
227; SSE-NEXT:  .LBB3_2: # %if.end
228; SSE-NEXT:    retq
229;
230; AVX-LABEL: f4:
231; AVX:       # %bb.0: # %entry
232; AVX-NEXT:    testl %edi, %edi
233; AVX-NEXT:    jle .LBB3_2
234; AVX-NEXT:  # %bb.1: # %if.then
235; AVX-NEXT:    vaddsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
236; AVX-NEXT:  .LBB3_2: # %if.end
237; AVX-NEXT:    retq
238entry:
239  %cmp = icmp sgt i32 %n, 0
240  br i1 %cmp, label %if.then, label %if.end
241
242if.then:
243  %add = call double @llvm.experimental.constrained.fadd.f64(
244                                               double 1.000000e+00, double %a,
245                                               metadata !"round.dynamic",
246                                               metadata !"fpexcept.strict") #0
247  br label %if.end
248
249if.end:
250  %a.0 = phi double [%add, %if.then], [ %a, %entry ]
251  ret double %a.0
252}
253
254; Verify that sqrt(42.0) isn't simplified when the rounding mode is unknown.
255define double @f5() #0 {
256; X87-LABEL: f5:
257; X87:       # %bb.0: # %entry
258; X87-NEXT:    flds {{\.?LCPI[0-9]+_[0-9]+}}
259; X87-NEXT:    fsqrt
260; X87-NEXT:    wait
261; X87-NEXT:    retl
262;
263; X86-SSE-LABEL: f5:
264; X86-SSE:       # %bb.0: # %entry
265; X86-SSE-NEXT:    subl $12, %esp
266; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
267; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
268; X86-SSE-NEXT:    sqrtsd %xmm0, %xmm0
269; X86-SSE-NEXT:    movsd %xmm0, (%esp)
270; X86-SSE-NEXT:    fldl (%esp)
271; X86-SSE-NEXT:    wait
272; X86-SSE-NEXT:    addl $12, %esp
273; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
274; X86-SSE-NEXT:    retl
275;
276; SSE-LABEL: f5:
277; SSE:       # %bb.0: # %entry
278; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
279; SSE-NEXT:    sqrtsd %xmm0, %xmm0
280; SSE-NEXT:    retq
281;
282; AVX-LABEL: f5:
283; AVX:       # %bb.0: # %entry
284; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
285; AVX-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0
286; AVX-NEXT:    retq
287entry:
288  %result = call double @llvm.experimental.constrained.sqrt.f64(double 42.0,
289                                               metadata !"round.dynamic",
290                                               metadata !"fpexcept.strict") #0
291  ret double %result
292}
293
294; Verify that pow(42.1, 3.0) isn't simplified when the rounding mode is unknown.
295define double @f6() #0 {
296; X87-LABEL: f6:
297; X87:       # %bb.0: # %entry
298; X87-NEXT:    subl $28, %esp
299; X87-NEXT:    .cfi_def_cfa_offset 32
300; X87-NEXT:    flds {{\.?LCPI[0-9]+_[0-9]+}}
301; X87-NEXT:    fstpl {{[0-9]+}}(%esp)
302; X87-NEXT:    fldl {{\.?LCPI[0-9]+_[0-9]+}}
303; X87-NEXT:    fstpl (%esp)
304; X87-NEXT:    wait
305; X87-NEXT:    calll pow
306; X87-NEXT:    addl $28, %esp
307; X87-NEXT:    .cfi_def_cfa_offset 4
308; X87-NEXT:    retl
309;
310; X86-SSE-LABEL: f6:
311; X86-SSE:       # %bb.0: # %entry
312; X86-SSE-NEXT:    subl $28, %esp
313; X86-SSE-NEXT:    .cfi_def_cfa_offset 32
314; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
315; X86-SSE-NEXT:    movsd %xmm0, {{[0-9]+}}(%esp)
316; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
317; X86-SSE-NEXT:    movsd %xmm0, (%esp)
318; X86-SSE-NEXT:    calll pow
319; X86-SSE-NEXT:    addl $28, %esp
320; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
321; X86-SSE-NEXT:    retl
322;
323; SSE-LABEL: f6:
324; SSE:       # %bb.0: # %entry
325; SSE-NEXT:    pushq %rax
326; SSE-NEXT:    .cfi_def_cfa_offset 16
327; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
328; SSE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
329; SSE-NEXT:    callq pow@PLT
330; SSE-NEXT:    popq %rax
331; SSE-NEXT:    .cfi_def_cfa_offset 8
332; SSE-NEXT:    retq
333;
334; AVX-LABEL: f6:
335; AVX:       # %bb.0: # %entry
336; AVX-NEXT:    pushq %rax
337; AVX-NEXT:    .cfi_def_cfa_offset 16
338; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
339; AVX-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
340; AVX-NEXT:    callq pow@PLT
341; AVX-NEXT:    popq %rax
342; AVX-NEXT:    .cfi_def_cfa_offset 8
343; AVX-NEXT:    retq
344entry:
345  %result = call double @llvm.experimental.constrained.pow.f64(double 42.1,
346                                               double 3.0,
347                                               metadata !"round.dynamic",
348                                               metadata !"fpexcept.strict") #0
349  ret double %result
350}
351
352; Verify that powi(42.1, 3) isn't simplified when the rounding mode is unknown.
353define double @f7() #0 {
354; X87-LABEL: f7:
355; X87:       # %bb.0: # %entry
356; X87-NEXT:    subl $12, %esp
357; X87-NEXT:    .cfi_def_cfa_offset 16
358; X87-NEXT:    fldl {{\.?LCPI[0-9]+_[0-9]+}}
359; X87-NEXT:    fstpl (%esp)
360; X87-NEXT:    wait
361; X87-NEXT:    movl $3, {{[0-9]+}}(%esp)
362; X87-NEXT:    calll __powidf2
363; X87-NEXT:    addl $12, %esp
364; X87-NEXT:    .cfi_def_cfa_offset 4
365; X87-NEXT:    retl
366;
367; X86-SSE-LABEL: f7:
368; X86-SSE:       # %bb.0: # %entry
369; X86-SSE-NEXT:    subl $12, %esp
370; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
371; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
372; X86-SSE-NEXT:    movsd %xmm0, (%esp)
373; X86-SSE-NEXT:    movl $3, {{[0-9]+}}(%esp)
374; X86-SSE-NEXT:    calll __powidf2
375; X86-SSE-NEXT:    addl $12, %esp
376; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
377; X86-SSE-NEXT:    retl
378;
379; SSE-LABEL: f7:
380; SSE:       # %bb.0: # %entry
381; SSE-NEXT:    pushq %rax
382; SSE-NEXT:    .cfi_def_cfa_offset 16
383; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
384; SSE-NEXT:    movl $3, %edi
385; SSE-NEXT:    callq __powidf2@PLT
386; SSE-NEXT:    popq %rax
387; SSE-NEXT:    .cfi_def_cfa_offset 8
388; SSE-NEXT:    retq
389;
390; AVX-LABEL: f7:
391; AVX:       # %bb.0: # %entry
392; AVX-NEXT:    pushq %rax
393; AVX-NEXT:    .cfi_def_cfa_offset 16
394; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
395; AVX-NEXT:    movl $3, %edi
396; AVX-NEXT:    callq __powidf2@PLT
397; AVX-NEXT:    popq %rax
398; AVX-NEXT:    .cfi_def_cfa_offset 8
399; AVX-NEXT:    retq
400entry:
401  %result = call double @llvm.experimental.constrained.powi.f64(double 42.1,
402                                               i32 3,
403                                               metadata !"round.dynamic",
404                                               metadata !"fpexcept.strict") #0
405  ret double %result
406}
407
408; Verify that sin(42.0) isn't simplified when the rounding mode is unknown.
409define double @f8() #0 {
410; X87-LABEL: f8:
411; X87:       # %bb.0: # %entry
412; X87-NEXT:    subl $12, %esp
413; X87-NEXT:    .cfi_def_cfa_offset 16
414; X87-NEXT:    flds {{\.?LCPI[0-9]+_[0-9]+}}
415; X87-NEXT:    fstpl (%esp)
416; X87-NEXT:    wait
417; X87-NEXT:    calll sin
418; X87-NEXT:    addl $12, %esp
419; X87-NEXT:    .cfi_def_cfa_offset 4
420; X87-NEXT:    retl
421;
422; X86-SSE-LABEL: f8:
423; X86-SSE:       # %bb.0: # %entry
424; X86-SSE-NEXT:    subl $12, %esp
425; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
426; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
427; X86-SSE-NEXT:    movsd %xmm0, (%esp)
428; X86-SSE-NEXT:    calll sin
429; X86-SSE-NEXT:    addl $12, %esp
430; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
431; X86-SSE-NEXT:    retl
432;
433; SSE-LABEL: f8:
434; SSE:       # %bb.0: # %entry
435; SSE-NEXT:    pushq %rax
436; SSE-NEXT:    .cfi_def_cfa_offset 16
437; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
438; SSE-NEXT:    callq sin@PLT
439; SSE-NEXT:    popq %rax
440; SSE-NEXT:    .cfi_def_cfa_offset 8
441; SSE-NEXT:    retq
442;
443; AVX-LABEL: f8:
444; AVX:       # %bb.0: # %entry
445; AVX-NEXT:    pushq %rax
446; AVX-NEXT:    .cfi_def_cfa_offset 16
447; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
448; AVX-NEXT:    callq sin@PLT
449; AVX-NEXT:    popq %rax
450; AVX-NEXT:    .cfi_def_cfa_offset 8
451; AVX-NEXT:    retq
452entry:
453  %result = call double @llvm.experimental.constrained.sin.f64(double 42.0,
454                                               metadata !"round.dynamic",
455                                               metadata !"fpexcept.strict") #0
456  ret double %result
457}
458
459; Verify that cos(42.0) isn't simplified when the rounding mode is unknown.
460define double @f9() #0 {
461; X87-LABEL: f9:
462; X87:       # %bb.0: # %entry
463; X87-NEXT:    subl $12, %esp
464; X87-NEXT:    .cfi_def_cfa_offset 16
465; X87-NEXT:    flds {{\.?LCPI[0-9]+_[0-9]+}}
466; X87-NEXT:    fstpl (%esp)
467; X87-NEXT:    wait
468; X87-NEXT:    calll cos
469; X87-NEXT:    addl $12, %esp
470; X87-NEXT:    .cfi_def_cfa_offset 4
471; X87-NEXT:    retl
472;
473; X86-SSE-LABEL: f9:
474; X86-SSE:       # %bb.0: # %entry
475; X86-SSE-NEXT:    subl $12, %esp
476; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
477; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
478; X86-SSE-NEXT:    movsd %xmm0, (%esp)
479; X86-SSE-NEXT:    calll cos
480; X86-SSE-NEXT:    addl $12, %esp
481; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
482; X86-SSE-NEXT:    retl
483;
484; SSE-LABEL: f9:
485; SSE:       # %bb.0: # %entry
486; SSE-NEXT:    pushq %rax
487; SSE-NEXT:    .cfi_def_cfa_offset 16
488; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
489; SSE-NEXT:    callq cos@PLT
490; SSE-NEXT:    popq %rax
491; SSE-NEXT:    .cfi_def_cfa_offset 8
492; SSE-NEXT:    retq
493;
494; AVX-LABEL: f9:
495; AVX:       # %bb.0: # %entry
496; AVX-NEXT:    pushq %rax
497; AVX-NEXT:    .cfi_def_cfa_offset 16
498; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
499; AVX-NEXT:    callq cos@PLT
500; AVX-NEXT:    popq %rax
501; AVX-NEXT:    .cfi_def_cfa_offset 8
502; AVX-NEXT:    retq
503entry:
504  %result = call double @llvm.experimental.constrained.cos.f64(double 42.0,
505                                               metadata !"round.dynamic",
506                                               metadata !"fpexcept.strict") #0
507  ret double %result
508}
509
510; Verify that exp(42.0) isn't simplified when the rounding mode is unknown.
511define double @f10() #0 {
512; X87-LABEL: f10:
513; X87:       # %bb.0: # %entry
514; X87-NEXT:    subl $12, %esp
515; X87-NEXT:    .cfi_def_cfa_offset 16
516; X87-NEXT:    flds {{\.?LCPI[0-9]+_[0-9]+}}
517; X87-NEXT:    fstpl (%esp)
518; X87-NEXT:    wait
519; X87-NEXT:    calll exp
520; X87-NEXT:    addl $12, %esp
521; X87-NEXT:    .cfi_def_cfa_offset 4
522; X87-NEXT:    retl
523;
524; X86-SSE-LABEL: f10:
525; X86-SSE:       # %bb.0: # %entry
526; X86-SSE-NEXT:    subl $12, %esp
527; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
528; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
529; X86-SSE-NEXT:    movsd %xmm0, (%esp)
530; X86-SSE-NEXT:    calll exp
531; X86-SSE-NEXT:    addl $12, %esp
532; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
533; X86-SSE-NEXT:    retl
534;
535; SSE-LABEL: f10:
536; SSE:       # %bb.0: # %entry
537; SSE-NEXT:    pushq %rax
538; SSE-NEXT:    .cfi_def_cfa_offset 16
539; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
540; SSE-NEXT:    callq exp@PLT
541; SSE-NEXT:    popq %rax
542; SSE-NEXT:    .cfi_def_cfa_offset 8
543; SSE-NEXT:    retq
544;
545; AVX-LABEL: f10:
546; AVX:       # %bb.0: # %entry
547; AVX-NEXT:    pushq %rax
548; AVX-NEXT:    .cfi_def_cfa_offset 16
549; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
550; AVX-NEXT:    callq exp@PLT
551; AVX-NEXT:    popq %rax
552; AVX-NEXT:    .cfi_def_cfa_offset 8
553; AVX-NEXT:    retq
554entry:
555  %result = call double @llvm.experimental.constrained.exp.f64(double 42.0,
556                                               metadata !"round.dynamic",
557                                               metadata !"fpexcept.strict") #0
558  ret double %result
559}
560
561; Verify that exp2(42.1) isn't simplified when the rounding mode is unknown.
562define double @f11() #0 {
563; X87-LABEL: f11:
564; X87:       # %bb.0: # %entry
565; X87-NEXT:    subl $12, %esp
566; X87-NEXT:    .cfi_def_cfa_offset 16
567; X87-NEXT:    fldl {{\.?LCPI[0-9]+_[0-9]+}}
568; X87-NEXT:    fstpl (%esp)
569; X87-NEXT:    wait
570; X87-NEXT:    calll exp2
571; X87-NEXT:    addl $12, %esp
572; X87-NEXT:    .cfi_def_cfa_offset 4
573; X87-NEXT:    retl
574;
575; X86-SSE-LABEL: f11:
576; X86-SSE:       # %bb.0: # %entry
577; X86-SSE-NEXT:    subl $12, %esp
578; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
579; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
580; X86-SSE-NEXT:    movsd %xmm0, (%esp)
581; X86-SSE-NEXT:    calll exp2
582; X86-SSE-NEXT:    addl $12, %esp
583; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
584; X86-SSE-NEXT:    retl
585;
586; SSE-LABEL: f11:
587; SSE:       # %bb.0: # %entry
588; SSE-NEXT:    pushq %rax
589; SSE-NEXT:    .cfi_def_cfa_offset 16
590; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
591; SSE-NEXT:    callq exp2@PLT
592; SSE-NEXT:    popq %rax
593; SSE-NEXT:    .cfi_def_cfa_offset 8
594; SSE-NEXT:    retq
595;
596; AVX-LABEL: f11:
597; AVX:       # %bb.0: # %entry
598; AVX-NEXT:    pushq %rax
599; AVX-NEXT:    .cfi_def_cfa_offset 16
600; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
601; AVX-NEXT:    callq exp2@PLT
602; AVX-NEXT:    popq %rax
603; AVX-NEXT:    .cfi_def_cfa_offset 8
604; AVX-NEXT:    retq
605entry:
606  %result = call double @llvm.experimental.constrained.exp2.f64(double 42.1,
607                                               metadata !"round.dynamic",
608                                               metadata !"fpexcept.strict") #0
609  ret double %result
610}
611
612; Verify that log(42.0) isn't simplified when the rounding mode is unknown.
613define double @f12() #0 {
614; X87-LABEL: f12:
615; X87:       # %bb.0: # %entry
616; X87-NEXT:    subl $12, %esp
617; X87-NEXT:    .cfi_def_cfa_offset 16
618; X87-NEXT:    flds {{\.?LCPI[0-9]+_[0-9]+}}
619; X87-NEXT:    fstpl (%esp)
620; X87-NEXT:    wait
621; X87-NEXT:    calll log
622; X87-NEXT:    addl $12, %esp
623; X87-NEXT:    .cfi_def_cfa_offset 4
624; X87-NEXT:    retl
625;
626; X86-SSE-LABEL: f12:
627; X86-SSE:       # %bb.0: # %entry
628; X86-SSE-NEXT:    subl $12, %esp
629; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
630; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
631; X86-SSE-NEXT:    movsd %xmm0, (%esp)
632; X86-SSE-NEXT:    calll log
633; X86-SSE-NEXT:    addl $12, %esp
634; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
635; X86-SSE-NEXT:    retl
636;
637; SSE-LABEL: f12:
638; SSE:       # %bb.0: # %entry
639; SSE-NEXT:    pushq %rax
640; SSE-NEXT:    .cfi_def_cfa_offset 16
641; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
642; SSE-NEXT:    callq log@PLT
643; SSE-NEXT:    popq %rax
644; SSE-NEXT:    .cfi_def_cfa_offset 8
645; SSE-NEXT:    retq
646;
647; AVX-LABEL: f12:
648; AVX:       # %bb.0: # %entry
649; AVX-NEXT:    pushq %rax
650; AVX-NEXT:    .cfi_def_cfa_offset 16
651; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
652; AVX-NEXT:    callq log@PLT
653; AVX-NEXT:    popq %rax
654; AVX-NEXT:    .cfi_def_cfa_offset 8
655; AVX-NEXT:    retq
656entry:
657  %result = call double @llvm.experimental.constrained.log.f64(double 42.0,
658                                               metadata !"round.dynamic",
659                                               metadata !"fpexcept.strict") #0
660  ret double %result
661}
662
663; Verify that log10(42.0) isn't simplified when the rounding mode is unknown.
664define double @f13() #0 {
665; X87-LABEL: f13:
666; X87:       # %bb.0: # %entry
667; X87-NEXT:    subl $12, %esp
668; X87-NEXT:    .cfi_def_cfa_offset 16
669; X87-NEXT:    flds {{\.?LCPI[0-9]+_[0-9]+}}
670; X87-NEXT:    fstpl (%esp)
671; X87-NEXT:    wait
672; X87-NEXT:    calll log10
673; X87-NEXT:    addl $12, %esp
674; X87-NEXT:    .cfi_def_cfa_offset 4
675; X87-NEXT:    retl
676;
677; X86-SSE-LABEL: f13:
678; X86-SSE:       # %bb.0: # %entry
679; X86-SSE-NEXT:    subl $12, %esp
680; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
681; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
682; X86-SSE-NEXT:    movsd %xmm0, (%esp)
683; X86-SSE-NEXT:    calll log10
684; X86-SSE-NEXT:    addl $12, %esp
685; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
686; X86-SSE-NEXT:    retl
687;
688; SSE-LABEL: f13:
689; SSE:       # %bb.0: # %entry
690; SSE-NEXT:    pushq %rax
691; SSE-NEXT:    .cfi_def_cfa_offset 16
692; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
693; SSE-NEXT:    callq log10@PLT
694; SSE-NEXT:    popq %rax
695; SSE-NEXT:    .cfi_def_cfa_offset 8
696; SSE-NEXT:    retq
697;
698; AVX-LABEL: f13:
699; AVX:       # %bb.0: # %entry
700; AVX-NEXT:    pushq %rax
701; AVX-NEXT:    .cfi_def_cfa_offset 16
702; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
703; AVX-NEXT:    callq log10@PLT
704; AVX-NEXT:    popq %rax
705; AVX-NEXT:    .cfi_def_cfa_offset 8
706; AVX-NEXT:    retq
707entry:
708  %result = call double @llvm.experimental.constrained.log10.f64(double 42.0,
709                                               metadata !"round.dynamic",
710                                               metadata !"fpexcept.strict") #0
711  ret double %result
712}
713
714; Verify that log2(42.0) isn't simplified when the rounding mode is unknown.
715define double @f14() #0 {
716; X87-LABEL: f14:
717; X87:       # %bb.0: # %entry
718; X87-NEXT:    subl $12, %esp
719; X87-NEXT:    .cfi_def_cfa_offset 16
720; X87-NEXT:    flds {{\.?LCPI[0-9]+_[0-9]+}}
721; X87-NEXT:    fstpl (%esp)
722; X87-NEXT:    wait
723; X87-NEXT:    calll log2
724; X87-NEXT:    addl $12, %esp
725; X87-NEXT:    .cfi_def_cfa_offset 4
726; X87-NEXT:    retl
727;
728; X86-SSE-LABEL: f14:
729; X86-SSE:       # %bb.0: # %entry
730; X86-SSE-NEXT:    subl $12, %esp
731; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
732; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
733; X86-SSE-NEXT:    movsd %xmm0, (%esp)
734; X86-SSE-NEXT:    calll log2
735; X86-SSE-NEXT:    addl $12, %esp
736; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
737; X86-SSE-NEXT:    retl
738;
739; SSE-LABEL: f14:
740; SSE:       # %bb.0: # %entry
741; SSE-NEXT:    pushq %rax
742; SSE-NEXT:    .cfi_def_cfa_offset 16
743; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
744; SSE-NEXT:    callq log2@PLT
745; SSE-NEXT:    popq %rax
746; SSE-NEXT:    .cfi_def_cfa_offset 8
747; SSE-NEXT:    retq
748;
749; AVX-LABEL: f14:
750; AVX:       # %bb.0: # %entry
751; AVX-NEXT:    pushq %rax
752; AVX-NEXT:    .cfi_def_cfa_offset 16
753; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
754; AVX-NEXT:    callq log2@PLT
755; AVX-NEXT:    popq %rax
756; AVX-NEXT:    .cfi_def_cfa_offset 8
757; AVX-NEXT:    retq
758entry:
759  %result = call double @llvm.experimental.constrained.log2.f64(double 42.0,
760                                               metadata !"round.dynamic",
761                                               metadata !"fpexcept.strict") #0
762  ret double %result
763}
764
765; Verify that rint(42.1) isn't simplified when the rounding mode is unknown.
766define double @f15() #0 {
767; X87-LABEL: f15:
768; X87:       # %bb.0: # %entry
769; X87-NEXT:    subl $12, %esp
770; X87-NEXT:    .cfi_def_cfa_offset 16
771; X87-NEXT:    fldl {{\.?LCPI[0-9]+_[0-9]+}}
772; X87-NEXT:    fstpl (%esp)
773; X87-NEXT:    wait
774; X87-NEXT:    calll rint
775; X87-NEXT:    addl $12, %esp
776; X87-NEXT:    .cfi_def_cfa_offset 4
777; X87-NEXT:    retl
778;
779; X86-SSE-LABEL: f15:
780; X86-SSE:       # %bb.0: # %entry
781; X86-SSE-NEXT:    subl $12, %esp
782; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
783; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
784; X86-SSE-NEXT:    movsd %xmm0, (%esp)
785; X86-SSE-NEXT:    calll rint
786; X86-SSE-NEXT:    addl $12, %esp
787; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
788; X86-SSE-NEXT:    retl
789;
790; SSE-LABEL: f15:
791; SSE:       # %bb.0: # %entry
792; SSE-NEXT:    pushq %rax
793; SSE-NEXT:    .cfi_def_cfa_offset 16
794; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
795; SSE-NEXT:    callq rint@PLT
796; SSE-NEXT:    popq %rax
797; SSE-NEXT:    .cfi_def_cfa_offset 8
798; SSE-NEXT:    retq
799;
800; AVX-LABEL: f15:
801; AVX:       # %bb.0: # %entry
802; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
803; AVX-NEXT:    vroundsd $4, %xmm0, %xmm0, %xmm0
804; AVX-NEXT:    retq
805entry:
806  %result = call double @llvm.experimental.constrained.rint.f64(double 42.1,
807                                               metadata !"round.dynamic",
808                                               metadata !"fpexcept.strict") #0
809  ret double %result
810}
811
812; Verify that nearbyint(42.1) isn't simplified when the rounding mode is
813; unknown.
814define double @f16() #0 {
815; X87-LABEL: f16:
816; X87:       # %bb.0: # %entry
817; X87-NEXT:    subl $12, %esp
818; X87-NEXT:    .cfi_def_cfa_offset 16
819; X87-NEXT:    fldl {{\.?LCPI[0-9]+_[0-9]+}}
820; X87-NEXT:    fstpl (%esp)
821; X87-NEXT:    wait
822; X87-NEXT:    calll nearbyint
823; X87-NEXT:    addl $12, %esp
824; X87-NEXT:    .cfi_def_cfa_offset 4
825; X87-NEXT:    retl
826;
827; X86-SSE-LABEL: f16:
828; X86-SSE:       # %bb.0: # %entry
829; X86-SSE-NEXT:    subl $12, %esp
830; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
831; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
832; X86-SSE-NEXT:    movsd %xmm0, (%esp)
833; X86-SSE-NEXT:    calll nearbyint
834; X86-SSE-NEXT:    addl $12, %esp
835; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
836; X86-SSE-NEXT:    retl
837;
838; SSE-LABEL: f16:
839; SSE:       # %bb.0: # %entry
840; SSE-NEXT:    pushq %rax
841; SSE-NEXT:    .cfi_def_cfa_offset 16
842; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
843; SSE-NEXT:    callq nearbyint@PLT
844; SSE-NEXT:    popq %rax
845; SSE-NEXT:    .cfi_def_cfa_offset 8
846; SSE-NEXT:    retq
847;
848; AVX-LABEL: f16:
849; AVX:       # %bb.0: # %entry
850; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
851; AVX-NEXT:    vroundsd $12, %xmm0, %xmm0, %xmm0
852; AVX-NEXT:    retq
853entry:
854  %result = call double @llvm.experimental.constrained.nearbyint.f64(
855                                               double 42.1,
856                                               metadata !"round.dynamic",
857                                               metadata !"fpexcept.strict") #0
858  ret double %result
859}
860
861define double @f19() #0 {
862; X87-LABEL: f19:
863; X87:       # %bb.0: # %entry
864; X87-NEXT:    subl $28, %esp
865; X87-NEXT:    .cfi_def_cfa_offset 32
866; X87-NEXT:    flds {{\.?LCPI[0-9]+_[0-9]+}}
867; X87-NEXT:    fstpl {{[0-9]+}}(%esp)
868; X87-NEXT:    wait
869; X87-NEXT:    movl $1072693248, {{[0-9]+}}(%esp) # imm = 0x3FF00000
870; X87-NEXT:    movl $0, (%esp)
871; X87-NEXT:    calll fmod
872; X87-NEXT:    addl $28, %esp
873; X87-NEXT:    .cfi_def_cfa_offset 4
874; X87-NEXT:    retl
875;
876; X86-SSE-LABEL: f19:
877; X86-SSE:       # %bb.0: # %entry
878; X86-SSE-NEXT:    subl $28, %esp
879; X86-SSE-NEXT:    .cfi_def_cfa_offset 32
880; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
881; X86-SSE-NEXT:    movsd %xmm0, {{[0-9]+}}(%esp)
882; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
883; X86-SSE-NEXT:    movsd %xmm0, (%esp)
884; X86-SSE-NEXT:    calll fmod
885; X86-SSE-NEXT:    addl $28, %esp
886; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
887; X86-SSE-NEXT:    retl
888;
889; SSE-LABEL: f19:
890; SSE:       # %bb.0: # %entry
891; SSE-NEXT:    pushq %rax
892; SSE-NEXT:    .cfi_def_cfa_offset 16
893; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
894; SSE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
895; SSE-NEXT:    callq fmod@PLT
896; SSE-NEXT:    popq %rax
897; SSE-NEXT:    .cfi_def_cfa_offset 8
898; SSE-NEXT:    retq
899;
900; AVX-LABEL: f19:
901; AVX:       # %bb.0: # %entry
902; AVX-NEXT:    pushq %rax
903; AVX-NEXT:    .cfi_def_cfa_offset 16
904; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
905; AVX-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
906; AVX-NEXT:    callq fmod@PLT
907; AVX-NEXT:    popq %rax
908; AVX-NEXT:    .cfi_def_cfa_offset 8
909; AVX-NEXT:    retq
910entry:
911  %rem = call double @llvm.experimental.constrained.frem.f64(
912                                               double 1.000000e+00,
913                                               double 1.000000e+01,
914                                               metadata !"round.dynamic",
915                                               metadata !"fpexcept.strict") #0
916  ret double %rem
917}
918
919; Verify that fptosi(%x) isn't simplified when the rounding mode is
920; unknown.
921; Verify that no gross errors happen.
922; FIXME: The SSE/AVX code does not raise an invalid exception for all values
923; that don't fit in i8.
924define i8 @f20s8(double %x) #0 {
925; X87-LABEL: f20s8:
926; X87:       # %bb.0: # %entry
927; X87-NEXT:    subl $8, %esp
928; X87-NEXT:    .cfi_def_cfa_offset 12
929; X87-NEXT:    fldl {{[0-9]+}}(%esp)
930; X87-NEXT:    wait
931; X87-NEXT:    fnstcw {{[0-9]+}}(%esp)
932; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
933; X87-NEXT:    orl $3072, %eax # imm = 0xC00
934; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
935; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
936; X87-NEXT:    fistps {{[0-9]+}}(%esp)
937; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
938; X87-NEXT:    movb {{[0-9]+}}(%esp), %al
939; X87-NEXT:    addl $8, %esp
940; X87-NEXT:    .cfi_def_cfa_offset 4
941; X87-NEXT:    retl
942;
943; X86-SSE-LABEL: f20s8:
944; X86-SSE:       # %bb.0: # %entry
945; X86-SSE-NEXT:    cvttsd2si {{[0-9]+}}(%esp), %eax
946; X86-SSE-NEXT:    # kill: def $al killed $al killed $eax
947; X86-SSE-NEXT:    retl
948;
949; SSE-LABEL: f20s8:
950; SSE:       # %bb.0: # %entry
951; SSE-NEXT:    cvttsd2si %xmm0, %eax
952; SSE-NEXT:    # kill: def $al killed $al killed $eax
953; SSE-NEXT:    retq
954;
955; AVX-LABEL: f20s8:
956; AVX:       # %bb.0: # %entry
957; AVX-NEXT:    vcvttsd2si %xmm0, %eax
958; AVX-NEXT:    # kill: def $al killed $al killed $eax
959; AVX-NEXT:    retq
960entry:
961  %result = call i8 @llvm.experimental.constrained.fptosi.i8.f64(double %x,
962                                               metadata !"fpexcept.strict") #0
963  ret i8 %result
964}
965
966; Verify that fptosi(%x) isn't simplified when the rounding mode is
967; unknown.
968; Verify that no gross errors happen.
969; FIXME: The SSE/AVX code does not raise an invalid exception for all values
970; that don't fit in i16.
971define i16 @f20s16(double %x) #0 {
972; X87-LABEL: f20s16:
973; X87:       # %bb.0: # %entry
974; X87-NEXT:    subl $8, %esp
975; X87-NEXT:    .cfi_def_cfa_offset 12
976; X87-NEXT:    fldl {{[0-9]+}}(%esp)
977; X87-NEXT:    wait
978; X87-NEXT:    fnstcw {{[0-9]+}}(%esp)
979; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
980; X87-NEXT:    orl $3072, %eax # imm = 0xC00
981; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
982; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
983; X87-NEXT:    fistps {{[0-9]+}}(%esp)
984; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
985; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
986; X87-NEXT:    addl $8, %esp
987; X87-NEXT:    .cfi_def_cfa_offset 4
988; X87-NEXT:    retl
989;
990; X86-SSE-LABEL: f20s16:
991; X86-SSE:       # %bb.0: # %entry
992; X86-SSE-NEXT:    cvttsd2si {{[0-9]+}}(%esp), %eax
993; X86-SSE-NEXT:    # kill: def $ax killed $ax killed $eax
994; X86-SSE-NEXT:    retl
995;
996; SSE-LABEL: f20s16:
997; SSE:       # %bb.0: # %entry
998; SSE-NEXT:    cvttsd2si %xmm0, %eax
999; SSE-NEXT:    # kill: def $ax killed $ax killed $eax
1000; SSE-NEXT:    retq
1001;
1002; AVX-LABEL: f20s16:
1003; AVX:       # %bb.0: # %entry
1004; AVX-NEXT:    vcvttsd2si %xmm0, %eax
1005; AVX-NEXT:    # kill: def $ax killed $ax killed $eax
1006; AVX-NEXT:    retq
1007entry:
1008  %result = call i16 @llvm.experimental.constrained.fptosi.i16.f64(double %x,
1009                                               metadata !"fpexcept.strict") #0
1010  ret i16 %result
1011}
1012
1013; Verify that fptosi(%x) isn't simplified when the rounding mode is
1014; unknown.
1015; Verify that no gross errors happen.
1016define i32 @f20s(double %x) #0 {
1017; X87-LABEL: f20s:
1018; X87:       # %bb.0: # %entry
1019; X87-NEXT:    subl $8, %esp
1020; X87-NEXT:    .cfi_def_cfa_offset 12
1021; X87-NEXT:    fldl {{[0-9]+}}(%esp)
1022; X87-NEXT:    wait
1023; X87-NEXT:    fnstcw (%esp)
1024; X87-NEXT:    movzwl (%esp), %eax
1025; X87-NEXT:    orl $3072, %eax # imm = 0xC00
1026; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
1027; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
1028; X87-NEXT:    fistpl {{[0-9]+}}(%esp)
1029; X87-NEXT:    fldcw (%esp)
1030; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
1031; X87-NEXT:    addl $8, %esp
1032; X87-NEXT:    .cfi_def_cfa_offset 4
1033; X87-NEXT:    retl
1034;
1035; X86-SSE-LABEL: f20s:
1036; X86-SSE:       # %bb.0: # %entry
1037; X86-SSE-NEXT:    cvttsd2si {{[0-9]+}}(%esp), %eax
1038; X86-SSE-NEXT:    retl
1039;
1040; SSE-LABEL: f20s:
1041; SSE:       # %bb.0: # %entry
1042; SSE-NEXT:    cvttsd2si %xmm0, %eax
1043; SSE-NEXT:    retq
1044;
1045; AVX-LABEL: f20s:
1046; AVX:       # %bb.0: # %entry
1047; AVX-NEXT:    vcvttsd2si %xmm0, %eax
1048; AVX-NEXT:    retq
1049entry:
1050  %result = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %x,
1051                                               metadata !"fpexcept.strict") #0
1052  ret i32 %result
1053}
1054
1055; Verify that fptoui(%x) isn't simplified when the rounding mode is
1056; unknown.
1057; Verify that no gross errors happen.
1058; FIXME: This code generates spurious inexact exceptions.
1059define i64 @f20s64(double %x) #0 {
1060; X87-LABEL: f20s64:
1061; X87:       # %bb.0: # %entry
1062; X87-NEXT:    subl $20, %esp
1063; X87-NEXT:    .cfi_def_cfa_offset 24
1064; X87-NEXT:    fldl {{[0-9]+}}(%esp)
1065; X87-NEXT:    wait
1066; X87-NEXT:    fnstcw {{[0-9]+}}(%esp)
1067; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
1068; X87-NEXT:    orl $3072, %eax # imm = 0xC00
1069; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
1070; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
1071; X87-NEXT:    fistpll {{[0-9]+}}(%esp)
1072; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
1073; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
1074; X87-NEXT:    movl {{[0-9]+}}(%esp), %edx
1075; X87-NEXT:    addl $20, %esp
1076; X87-NEXT:    .cfi_def_cfa_offset 4
1077; X87-NEXT:    retl
1078;
1079; X86-SSE-LABEL: f20s64:
1080; X86-SSE:       # %bb.0: # %entry
1081; X86-SSE-NEXT:    subl $20, %esp
1082; X86-SSE-NEXT:    .cfi_def_cfa_offset 24
1083; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
1084; X86-SSE-NEXT:    movsd %xmm0, {{[0-9]+}}(%esp)
1085; X86-SSE-NEXT:    fldl {{[0-9]+}}(%esp)
1086; X86-SSE-NEXT:    wait
1087; X86-SSE-NEXT:    fnstcw {{[0-9]+}}(%esp)
1088; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
1089; X86-SSE-NEXT:    orl $3072, %eax # imm = 0xC00
1090; X86-SSE-NEXT:    movw %ax, {{[0-9]+}}(%esp)
1091; X86-SSE-NEXT:    fldcw {{[0-9]+}}(%esp)
1092; X86-SSE-NEXT:    fistpll {{[0-9]+}}(%esp)
1093; X86-SSE-NEXT:    fldcw {{[0-9]+}}(%esp)
1094; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
1095; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %edx
1096; X86-SSE-NEXT:    addl $20, %esp
1097; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
1098; X86-SSE-NEXT:    retl
1099;
1100; SSE-LABEL: f20s64:
1101; SSE:       # %bb.0: # %entry
1102; SSE-NEXT:    cvttsd2si %xmm0, %rax
1103; SSE-NEXT:    retq
1104;
1105; AVX-LABEL: f20s64:
1106; AVX:       # %bb.0: # %entry
1107; AVX-NEXT:    vcvttsd2si %xmm0, %rax
1108; AVX-NEXT:    retq
1109entry:
1110  %result = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %x,
1111                                               metadata !"fpexcept.strict") #0
1112  ret i64 %result
1113}
1114
1115; Verify that fptoui(%x) isn't simplified when the rounding mode is
1116; unknown.
1117; Verify that no gross errors happen.
1118define i128 @f20s128(double %x) nounwind strictfp {
1119; X87-LABEL: f20s128:
1120; X87:       # %bb.0: # %entry
1121; X87-NEXT:    pushl %edi
1122; X87-NEXT:    pushl %esi
1123; X87-NEXT:    subl $36, %esp
1124; X87-NEXT:    movl {{[0-9]+}}(%esp), %esi
1125; X87-NEXT:    fldl {{[0-9]+}}(%esp)
1126; X87-NEXT:    fstpl {{[0-9]+}}(%esp)
1127; X87-NEXT:    wait
1128; X87-NEXT:    leal {{[0-9]+}}(%esp), %eax
1129; X87-NEXT:    movl %eax, (%esp)
1130; X87-NEXT:    calll __fixdfti
1131; X87-NEXT:    subl $4, %esp
1132; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
1133; X87-NEXT:    movl {{[0-9]+}}(%esp), %ecx
1134; X87-NEXT:    movl {{[0-9]+}}(%esp), %edx
1135; X87-NEXT:    movl {{[0-9]+}}(%esp), %edi
1136; X87-NEXT:    movl %edi, 8(%esi)
1137; X87-NEXT:    movl %edx, 12(%esi)
1138; X87-NEXT:    movl %eax, (%esi)
1139; X87-NEXT:    movl %ecx, 4(%esi)
1140; X87-NEXT:    movl %esi, %eax
1141; X87-NEXT:    addl $36, %esp
1142; X87-NEXT:    popl %esi
1143; X87-NEXT:    popl %edi
1144; X87-NEXT:    retl $4
1145;
1146; X86-SSE-LABEL: f20s128:
1147; X86-SSE:       # %bb.0: # %entry
1148; X86-SSE-NEXT:    pushl %edi
1149; X86-SSE-NEXT:    pushl %esi
1150; X86-SSE-NEXT:    subl $36, %esp
1151; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %esi
1152; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
1153; X86-SSE-NEXT:    movsd %xmm0, {{[0-9]+}}(%esp)
1154; X86-SSE-NEXT:    leal {{[0-9]+}}(%esp), %eax
1155; X86-SSE-NEXT:    movl %eax, (%esp)
1156; X86-SSE-NEXT:    calll __fixdfti
1157; X86-SSE-NEXT:    subl $4, %esp
1158; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
1159; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
1160; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %edx
1161; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %edi
1162; X86-SSE-NEXT:    movl %edi, 8(%esi)
1163; X86-SSE-NEXT:    movl %edx, 12(%esi)
1164; X86-SSE-NEXT:    movl %eax, (%esi)
1165; X86-SSE-NEXT:    movl %ecx, 4(%esi)
1166; X86-SSE-NEXT:    movl %esi, %eax
1167; X86-SSE-NEXT:    addl $36, %esp
1168; X86-SSE-NEXT:    popl %esi
1169; X86-SSE-NEXT:    popl %edi
1170; X86-SSE-NEXT:    retl $4
1171;
1172; SSE-LABEL: f20s128:
1173; SSE:       # %bb.0: # %entry
1174; SSE-NEXT:    pushq %rax
1175; SSE-NEXT:    callq __fixdfti@PLT
1176; SSE-NEXT:    popq %rcx
1177; SSE-NEXT:    retq
1178;
1179; AVX-LABEL: f20s128:
1180; AVX:       # %bb.0: # %entry
1181; AVX-NEXT:    pushq %rax
1182; AVX-NEXT:    callq __fixdfti@PLT
1183; AVX-NEXT:    popq %rcx
1184; AVX-NEXT:    retq
1185entry:
1186  %result = call i128 @llvm.experimental.constrained.fptosi.i128.f64(double %x,
1187                                               metadata !"fpexcept.strict") #0
1188  ret i128 %result
1189}
1190
1191; Verify that fptoui(%x) isn't simplified when the rounding mode is
1192; unknown.
1193; Verify that no gross errors happen.
1194; FIXME: The SSE/AVX code does not raise an invalid exception for all values
1195; that don't fit in i8.
1196define i8 @f20u8(double %x) #0 {
1197; X87-LABEL: f20u8:
1198; X87:       # %bb.0: # %entry
1199; X87-NEXT:    subl $8, %esp
1200; X87-NEXT:    .cfi_def_cfa_offset 12
1201; X87-NEXT:    fldl {{[0-9]+}}(%esp)
1202; X87-NEXT:    wait
1203; X87-NEXT:    fnstcw {{[0-9]+}}(%esp)
1204; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
1205; X87-NEXT:    orl $3072, %eax # imm = 0xC00
1206; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
1207; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
1208; X87-NEXT:    fistps {{[0-9]+}}(%esp)
1209; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
1210; X87-NEXT:    movb {{[0-9]+}}(%esp), %al
1211; X87-NEXT:    addl $8, %esp
1212; X87-NEXT:    .cfi_def_cfa_offset 4
1213; X87-NEXT:    retl
1214;
1215; X86-SSE-LABEL: f20u8:
1216; X86-SSE:       # %bb.0: # %entry
1217; X86-SSE-NEXT:    cvttsd2si {{[0-9]+}}(%esp), %eax
1218; X86-SSE-NEXT:    # kill: def $al killed $al killed $eax
1219; X86-SSE-NEXT:    retl
1220;
1221; SSE-LABEL: f20u8:
1222; SSE:       # %bb.0: # %entry
1223; SSE-NEXT:    cvttsd2si %xmm0, %eax
1224; SSE-NEXT:    # kill: def $al killed $al killed $eax
1225; SSE-NEXT:    retq
1226;
1227; AVX-LABEL: f20u8:
1228; AVX:       # %bb.0: # %entry
1229; AVX-NEXT:    vcvttsd2si %xmm0, %eax
1230; AVX-NEXT:    # kill: def $al killed $al killed $eax
1231; AVX-NEXT:    retq
1232entry:
1233  %result = call i8 @llvm.experimental.constrained.fptoui.i8.f64(double %x,
1234                                               metadata !"fpexcept.strict") #0
1235  ret i8 %result
1236}
1237; Verify that fptoui(%x) isn't simplified when the rounding mode is
1238; unknown.
1239; Verify that no gross errors happen.
1240; FIXME: The SSE/AVX code does not raise an invalid exception for all values
1241; that don't fit in i16.
1242define i16 @f20u16(double %x) #0 {
1243; X87-LABEL: f20u16:
1244; X87:       # %bb.0: # %entry
1245; X87-NEXT:    subl $8, %esp
1246; X87-NEXT:    .cfi_def_cfa_offset 12
1247; X87-NEXT:    fldl {{[0-9]+}}(%esp)
1248; X87-NEXT:    wait
1249; X87-NEXT:    fnstcw (%esp)
1250; X87-NEXT:    movzwl (%esp), %eax
1251; X87-NEXT:    orl $3072, %eax # imm = 0xC00
1252; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
1253; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
1254; X87-NEXT:    fistpl {{[0-9]+}}(%esp)
1255; X87-NEXT:    fldcw (%esp)
1256; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
1257; X87-NEXT:    # kill: def $ax killed $ax killed $eax
1258; X87-NEXT:    addl $8, %esp
1259; X87-NEXT:    .cfi_def_cfa_offset 4
1260; X87-NEXT:    retl
1261;
1262; X86-SSE-LABEL: f20u16:
1263; X86-SSE:       # %bb.0: # %entry
1264; X86-SSE-NEXT:    cvttsd2si {{[0-9]+}}(%esp), %eax
1265; X86-SSE-NEXT:    # kill: def $ax killed $ax killed $eax
1266; X86-SSE-NEXT:    retl
1267;
1268; SSE-LABEL: f20u16:
1269; SSE:       # %bb.0: # %entry
1270; SSE-NEXT:    cvttsd2si %xmm0, %eax
1271; SSE-NEXT:    # kill: def $ax killed $ax killed $eax
1272; SSE-NEXT:    retq
1273;
1274; AVX-LABEL: f20u16:
1275; AVX:       # %bb.0: # %entry
1276; AVX-NEXT:    vcvttsd2si %xmm0, %eax
1277; AVX-NEXT:    # kill: def $ax killed $ax killed $eax
1278; AVX-NEXT:    retq
1279entry:
1280  %result = call i16 @llvm.experimental.constrained.fptoui.i16.f64(double %x,
1281                                               metadata !"fpexcept.strict") #0
1282  ret i16 %result
1283}
1284
1285; Verify that fptoui(%x) isn't simplified when the rounding mode is
1286; unknown.
1287; Verify that no gross errors happen.
1288; FIXME: The X87/SSE/AVX1 code does not raise an invalid exception for all
1289; values that don't fit in i32. The AVX512 code does.
1290define i32 @f20u(double %x) #0 {
1291; X87-LABEL: f20u:
1292; X87:       # %bb.0: # %entry
1293; X87-NEXT:    subl $20, %esp
1294; X87-NEXT:    .cfi_def_cfa_offset 24
1295; X87-NEXT:    fldl {{[0-9]+}}(%esp)
1296; X87-NEXT:    wait
1297; X87-NEXT:    fnstcw {{[0-9]+}}(%esp)
1298; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
1299; X87-NEXT:    orl $3072, %eax # imm = 0xC00
1300; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
1301; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
1302; X87-NEXT:    fistpll {{[0-9]+}}(%esp)
1303; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
1304; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
1305; X87-NEXT:    addl $20, %esp
1306; X87-NEXT:    .cfi_def_cfa_offset 4
1307; X87-NEXT:    retl
1308;
1309; X86-SSE-LABEL: f20u:
1310; X86-SSE:       # %bb.0: # %entry
1311; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
1312; X86-SSE-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
1313; X86-SSE-NEXT:    comisd %xmm0, %xmm2
1314; X86-SSE-NEXT:    xorpd %xmm1, %xmm1
1315; X86-SSE-NEXT:    ja .LBB24_2
1316; X86-SSE-NEXT:  # %bb.1: # %entry
1317; X86-SSE-NEXT:    movapd %xmm2, %xmm1
1318; X86-SSE-NEXT:  .LBB24_2: # %entry
1319; X86-SSE-NEXT:    setbe %al
1320; X86-SSE-NEXT:    movzbl %al, %ecx
1321; X86-SSE-NEXT:    shll $31, %ecx
1322; X86-SSE-NEXT:    subsd %xmm1, %xmm0
1323; X86-SSE-NEXT:    cvttsd2si %xmm0, %eax
1324; X86-SSE-NEXT:    xorl %ecx, %eax
1325; X86-SSE-NEXT:    retl
1326;
1327; SSE-LABEL: f20u:
1328; SSE:       # %bb.0: # %entry
1329; SSE-NEXT:    cvttsd2si %xmm0, %rax
1330; SSE-NEXT:    # kill: def $eax killed $eax killed $rax
1331; SSE-NEXT:    retq
1332;
1333; AVX1-LABEL: f20u:
1334; AVX1:       # %bb.0: # %entry
1335; AVX1-NEXT:    vcvttsd2si %xmm0, %rax
1336; AVX1-NEXT:    # kill: def $eax killed $eax killed $rax
1337; AVX1-NEXT:    retq
1338;
1339; AVX512-LABEL: f20u:
1340; AVX512:       # %bb.0: # %entry
1341; AVX512-NEXT:    vcvttsd2usi %xmm0, %eax
1342; AVX512-NEXT:    retq
1343entry:
1344  %result = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %x,
1345                                               metadata !"fpexcept.strict") #0
1346  ret i32 %result
1347}
1348
1349; Verify that fptoui(%x) isn't simplified when the rounding mode is
1350; unknown.
1351; Verify that no gross errors happen.
1352; FIXME: This code generates spurious inexact exceptions.
1353define i64 @f20u64(double %x) #0 {
1354; X87-LABEL: f20u64:
1355; X87:       # %bb.0: # %entry
1356; X87-NEXT:    subl $20, %esp
1357; X87-NEXT:    .cfi_def_cfa_offset 24
1358; X87-NEXT:    fldl {{[0-9]+}}(%esp)
1359; X87-NEXT:    flds {{\.?LCPI[0-9]+_[0-9]+}}
1360; X87-NEXT:    wait
1361; X87-NEXT:    xorl %edx, %edx
1362; X87-NEXT:    fcomi %st(1), %st
1363; X87-NEXT:    wait
1364; X87-NEXT:    setbe %dl
1365; X87-NEXT:    fldz
1366; X87-NEXT:    fcmovbe %st(1), %st
1367; X87-NEXT:    fstp %st(1)
1368; X87-NEXT:    fsubrp %st, %st(1)
1369; X87-NEXT:    wait
1370; X87-NEXT:    fnstcw {{[0-9]+}}(%esp)
1371; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
1372; X87-NEXT:    orl $3072, %eax # imm = 0xC00
1373; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
1374; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
1375; X87-NEXT:    fistpll {{[0-9]+}}(%esp)
1376; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
1377; X87-NEXT:    shll $31, %edx
1378; X87-NEXT:    xorl {{[0-9]+}}(%esp), %edx
1379; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
1380; X87-NEXT:    addl $20, %esp
1381; X87-NEXT:    .cfi_def_cfa_offset 4
1382; X87-NEXT:    retl
1383;
1384; X86-SSE-LABEL: f20u64:
1385; X86-SSE:       # %bb.0: # %entry
1386; X86-SSE-NEXT:    subl $20, %esp
1387; X86-SSE-NEXT:    .cfi_def_cfa_offset 24
1388; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
1389; X86-SSE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
1390; X86-SSE-NEXT:    comisd %xmm0, %xmm1
1391; X86-SSE-NEXT:    jbe .LBB25_2
1392; X86-SSE-NEXT:  # %bb.1: # %entry
1393; X86-SSE-NEXT:    xorpd %xmm1, %xmm1
1394; X86-SSE-NEXT:  .LBB25_2: # %entry
1395; X86-SSE-NEXT:    subsd %xmm1, %xmm0
1396; X86-SSE-NEXT:    movsd %xmm0, {{[0-9]+}}(%esp)
1397; X86-SSE-NEXT:    setbe %al
1398; X86-SSE-NEXT:    fldl {{[0-9]+}}(%esp)
1399; X86-SSE-NEXT:    wait
1400; X86-SSE-NEXT:    fnstcw {{[0-9]+}}(%esp)
1401; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
1402; X86-SSE-NEXT:    orl $3072, %ecx # imm = 0xC00
1403; X86-SSE-NEXT:    movw %cx, {{[0-9]+}}(%esp)
1404; X86-SSE-NEXT:    fldcw {{[0-9]+}}(%esp)
1405; X86-SSE-NEXT:    fistpll {{[0-9]+}}(%esp)
1406; X86-SSE-NEXT:    fldcw {{[0-9]+}}(%esp)
1407; X86-SSE-NEXT:    movzbl %al, %edx
1408; X86-SSE-NEXT:    shll $31, %edx
1409; X86-SSE-NEXT:    xorl {{[0-9]+}}(%esp), %edx
1410; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
1411; X86-SSE-NEXT:    addl $20, %esp
1412; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
1413; X86-SSE-NEXT:    retl
1414;
1415; SSE-LABEL: f20u64:
1416; SSE:       # %bb.0: # %entry
1417; SSE-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
1418; SSE-NEXT:    comisd %xmm2, %xmm0
1419; SSE-NEXT:    xorpd %xmm1, %xmm1
1420; SSE-NEXT:    jb .LBB25_2
1421; SSE-NEXT:  # %bb.1: # %entry
1422; SSE-NEXT:    movapd %xmm2, %xmm1
1423; SSE-NEXT:  .LBB25_2: # %entry
1424; SSE-NEXT:    subsd %xmm1, %xmm0
1425; SSE-NEXT:    cvttsd2si %xmm0, %rcx
1426; SSE-NEXT:    setae %al
1427; SSE-NEXT:    movzbl %al, %eax
1428; SSE-NEXT:    shlq $63, %rax
1429; SSE-NEXT:    xorq %rcx, %rax
1430; SSE-NEXT:    retq
1431;
1432; AVX1-LABEL: f20u64:
1433; AVX1:       # %bb.0: # %entry
1434; AVX1-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
1435; AVX1-NEXT:    vcomisd %xmm1, %xmm0
1436; AVX1-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
1437; AVX1-NEXT:    jb .LBB25_2
1438; AVX1-NEXT:  # %bb.1: # %entry
1439; AVX1-NEXT:    vmovapd %xmm1, %xmm2
1440; AVX1-NEXT:  .LBB25_2: # %entry
1441; AVX1-NEXT:    vsubsd %xmm2, %xmm0, %xmm0
1442; AVX1-NEXT:    vcvttsd2si %xmm0, %rcx
1443; AVX1-NEXT:    setae %al
1444; AVX1-NEXT:    movzbl %al, %eax
1445; AVX1-NEXT:    shlq $63, %rax
1446; AVX1-NEXT:    xorq %rcx, %rax
1447; AVX1-NEXT:    retq
1448;
1449; AVX512-LABEL: f20u64:
1450; AVX512:       # %bb.0: # %entry
1451; AVX512-NEXT:    vcvttsd2usi %xmm0, %rax
1452; AVX512-NEXT:    retq
1453entry:
1454  %result = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %x,
1455                                               metadata !"fpexcept.strict") #0
1456  ret i64 %result
1457}
1458
1459
1460; Verify that fptoui(%x) isn't simplified when the rounding mode is
1461; unknown.
1462; Verify that no gross errors happen.
1463define i128 @f20u128(double %x) nounwind strictfp {
1464; X87-LABEL: f20u128:
1465; X87:       # %bb.0: # %entry
1466; X87-NEXT:    pushl %edi
1467; X87-NEXT:    pushl %esi
1468; X87-NEXT:    subl $36, %esp
1469; X87-NEXT:    movl {{[0-9]+}}(%esp), %esi
1470; X87-NEXT:    fldl {{[0-9]+}}(%esp)
1471; X87-NEXT:    fstpl {{[0-9]+}}(%esp)
1472; X87-NEXT:    wait
1473; X87-NEXT:    leal {{[0-9]+}}(%esp), %eax
1474; X87-NEXT:    movl %eax, (%esp)
1475; X87-NEXT:    calll __fixunsdfti
1476; X87-NEXT:    subl $4, %esp
1477; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
1478; X87-NEXT:    movl {{[0-9]+}}(%esp), %ecx
1479; X87-NEXT:    movl {{[0-9]+}}(%esp), %edx
1480; X87-NEXT:    movl {{[0-9]+}}(%esp), %edi
1481; X87-NEXT:    movl %edi, 8(%esi)
1482; X87-NEXT:    movl %edx, 12(%esi)
1483; X87-NEXT:    movl %eax, (%esi)
1484; X87-NEXT:    movl %ecx, 4(%esi)
1485; X87-NEXT:    movl %esi, %eax
1486; X87-NEXT:    addl $36, %esp
1487; X87-NEXT:    popl %esi
1488; X87-NEXT:    popl %edi
1489; X87-NEXT:    retl $4
1490;
1491; X86-SSE-LABEL: f20u128:
1492; X86-SSE:       # %bb.0: # %entry
1493; X86-SSE-NEXT:    pushl %edi
1494; X86-SSE-NEXT:    pushl %esi
1495; X86-SSE-NEXT:    subl $36, %esp
1496; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %esi
1497; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
1498; X86-SSE-NEXT:    movsd %xmm0, {{[0-9]+}}(%esp)
1499; X86-SSE-NEXT:    leal {{[0-9]+}}(%esp), %eax
1500; X86-SSE-NEXT:    movl %eax, (%esp)
1501; X86-SSE-NEXT:    calll __fixunsdfti
1502; X86-SSE-NEXT:    subl $4, %esp
1503; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
1504; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
1505; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %edx
1506; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %edi
1507; X86-SSE-NEXT:    movl %edi, 8(%esi)
1508; X86-SSE-NEXT:    movl %edx, 12(%esi)
1509; X86-SSE-NEXT:    movl %eax, (%esi)
1510; X86-SSE-NEXT:    movl %ecx, 4(%esi)
1511; X86-SSE-NEXT:    movl %esi, %eax
1512; X86-SSE-NEXT:    addl $36, %esp
1513; X86-SSE-NEXT:    popl %esi
1514; X86-SSE-NEXT:    popl %edi
1515; X86-SSE-NEXT:    retl $4
1516;
1517; SSE-LABEL: f20u128:
1518; SSE:       # %bb.0: # %entry
1519; SSE-NEXT:    pushq %rax
1520; SSE-NEXT:    callq __fixunsdfti@PLT
1521; SSE-NEXT:    popq %rcx
1522; SSE-NEXT:    retq
1523;
1524; AVX-LABEL: f20u128:
1525; AVX:       # %bb.0: # %entry
1526; AVX-NEXT:    pushq %rax
1527; AVX-NEXT:    callq __fixunsdfti@PLT
1528; AVX-NEXT:    popq %rcx
1529; AVX-NEXT:    retq
1530entry:
1531  %result = call i128 @llvm.experimental.constrained.fptoui.i128.f64(double %x,
1532                                               metadata !"fpexcept.strict") #0
1533  ret i128 %result
1534}
1535
1536; Verify that round(42.1) isn't simplified when the rounding mode is
1537; unknown.
1538; Verify that no gross errors happen.
1539define float @f21() #0 {
1540; X87-LABEL: f21:
1541; X87:       # %bb.0: # %entry
1542; X87-NEXT:    pushl %eax
1543; X87-NEXT:    .cfi_def_cfa_offset 8
1544; X87-NEXT:    fldl {{\.?LCPI[0-9]+_[0-9]+}}
1545; X87-NEXT:    fstps (%esp)
1546; X87-NEXT:    flds (%esp)
1547; X87-NEXT:    wait
1548; X87-NEXT:    popl %eax
1549; X87-NEXT:    .cfi_def_cfa_offset 4
1550; X87-NEXT:    retl
1551;
1552; X86-SSE-LABEL: f21:
1553; X86-SSE:       # %bb.0: # %entry
1554; X86-SSE-NEXT:    pushl %eax
1555; X86-SSE-NEXT:    .cfi_def_cfa_offset 8
1556; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
1557; X86-SSE-NEXT:    cvtsd2ss %xmm0, %xmm0
1558; X86-SSE-NEXT:    movss %xmm0, (%esp)
1559; X86-SSE-NEXT:    flds (%esp)
1560; X86-SSE-NEXT:    wait
1561; X86-SSE-NEXT:    popl %eax
1562; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
1563; X86-SSE-NEXT:    retl
1564;
1565; SSE-LABEL: f21:
1566; SSE:       # %bb.0: # %entry
1567; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
1568; SSE-NEXT:    cvtsd2ss %xmm0, %xmm0
1569; SSE-NEXT:    retq
1570;
1571; AVX-LABEL: f21:
1572; AVX:       # %bb.0: # %entry
1573; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
1574; AVX-NEXT:    vcvtsd2ss %xmm0, %xmm0, %xmm0
1575; AVX-NEXT:    retq
1576entry:
1577  %result = call float @llvm.experimental.constrained.fptrunc.f32.f64(
1578                                               double 42.1,
1579                                               metadata !"round.dynamic",
1580                                               metadata !"fpexcept.strict") #0
1581  ret float %result
1582}
1583
1584define double @f22(float %x) #0 {
1585; X87-LABEL: f22:
1586; X87:       # %bb.0: # %entry
1587; X87-NEXT:    flds {{[0-9]+}}(%esp)
1588; X87-NEXT:    wait
1589; X87-NEXT:    retl
1590;
1591; X86-SSE-LABEL: f22:
1592; X86-SSE:       # %bb.0: # %entry
1593; X86-SSE-NEXT:    subl $12, %esp
1594; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
1595; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
1596; X86-SSE-NEXT:    cvtss2sd %xmm0, %xmm0
1597; X86-SSE-NEXT:    movsd %xmm0, (%esp)
1598; X86-SSE-NEXT:    fldl (%esp)
1599; X86-SSE-NEXT:    wait
1600; X86-SSE-NEXT:    addl $12, %esp
1601; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
1602; X86-SSE-NEXT:    retl
1603;
1604; SSE-LABEL: f22:
1605; SSE:       # %bb.0: # %entry
1606; SSE-NEXT:    cvtss2sd %xmm0, %xmm0
1607; SSE-NEXT:    retq
1608;
1609; AVX-LABEL: f22:
1610; AVX:       # %bb.0: # %entry
1611; AVX-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
1612; AVX-NEXT:    retq
1613entry:
1614  %result = call double @llvm.experimental.constrained.fpext.f64.f32(float %x,
1615                                               metadata !"fpexcept.strict") #0
1616  ret double %result
1617}
1618
1619define i32 @f23(double %x) #0 {
1620; X87-LABEL: f23:
1621; X87:       # %bb.0: # %entry
1622; X87-NEXT:    subl $12, %esp
1623; X87-NEXT:    .cfi_def_cfa_offset 16
1624; X87-NEXT:    fldl {{[0-9]+}}(%esp)
1625; X87-NEXT:    fstpl (%esp)
1626; X87-NEXT:    wait
1627; X87-NEXT:    calll lrint
1628; X87-NEXT:    addl $12, %esp
1629; X87-NEXT:    .cfi_def_cfa_offset 4
1630; X87-NEXT:    retl
1631;
1632; X86-SSE-LABEL: f23:
1633; X86-SSE:       # %bb.0: # %entry
1634; X86-SSE-NEXT:    subl $12, %esp
1635; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
1636; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
1637; X86-SSE-NEXT:    movsd %xmm0, (%esp)
1638; X86-SSE-NEXT:    calll lrint
1639; X86-SSE-NEXT:    addl $12, %esp
1640; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
1641; X86-SSE-NEXT:    retl
1642;
1643; SSE-LABEL: f23:
1644; SSE:       # %bb.0: # %entry
1645; SSE-NEXT:    pushq %rax
1646; SSE-NEXT:    .cfi_def_cfa_offset 16
1647; SSE-NEXT:    callq lrint@PLT
1648; SSE-NEXT:    popq %rcx
1649; SSE-NEXT:    .cfi_def_cfa_offset 8
1650; SSE-NEXT:    retq
1651;
1652; AVX-LABEL: f23:
1653; AVX:       # %bb.0: # %entry
1654; AVX-NEXT:    pushq %rax
1655; AVX-NEXT:    .cfi_def_cfa_offset 16
1656; AVX-NEXT:    callq lrint@PLT
1657; AVX-NEXT:    popq %rcx
1658; AVX-NEXT:    .cfi_def_cfa_offset 8
1659; AVX-NEXT:    retq
1660entry:
1661  %result = call i32 @llvm.experimental.constrained.lrint.i32.f64(double %x,
1662                                               metadata !"round.dynamic",
1663                                               metadata !"fpexcept.strict") #0
1664  ret i32 %result
1665}
1666
1667define i32 @f24(float %x) #0 {
1668; X87-LABEL: f24:
1669; X87:       # %bb.0: # %entry
1670; X87-NEXT:    subl $12, %esp
1671; X87-NEXT:    .cfi_def_cfa_offset 16
1672; X87-NEXT:    flds {{[0-9]+}}(%esp)
1673; X87-NEXT:    fstps (%esp)
1674; X87-NEXT:    wait
1675; X87-NEXT:    calll lrintf
1676; X87-NEXT:    addl $12, %esp
1677; X87-NEXT:    .cfi_def_cfa_offset 4
1678; X87-NEXT:    retl
1679;
1680; X86-SSE-LABEL: f24:
1681; X86-SSE:       # %bb.0: # %entry
1682; X86-SSE-NEXT:    subl $12, %esp
1683; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
1684; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
1685; X86-SSE-NEXT:    movss %xmm0, (%esp)
1686; X86-SSE-NEXT:    calll lrintf
1687; X86-SSE-NEXT:    addl $12, %esp
1688; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
1689; X86-SSE-NEXT:    retl
1690;
1691; SSE-LABEL: f24:
1692; SSE:       # %bb.0: # %entry
1693; SSE-NEXT:    pushq %rax
1694; SSE-NEXT:    .cfi_def_cfa_offset 16
1695; SSE-NEXT:    callq lrintf@PLT
1696; SSE-NEXT:    popq %rcx
1697; SSE-NEXT:    .cfi_def_cfa_offset 8
1698; SSE-NEXT:    retq
1699;
1700; AVX-LABEL: f24:
1701; AVX:       # %bb.0: # %entry
1702; AVX-NEXT:    pushq %rax
1703; AVX-NEXT:    .cfi_def_cfa_offset 16
1704; AVX-NEXT:    callq lrintf@PLT
1705; AVX-NEXT:    popq %rcx
1706; AVX-NEXT:    .cfi_def_cfa_offset 8
1707; AVX-NEXT:    retq
1708entry:
1709  %result = call i32 @llvm.experimental.constrained.lrint.i32.f32(float %x,
1710                                               metadata !"round.dynamic",
1711                                               metadata !"fpexcept.strict") #0
1712  ret i32 %result
1713}
1714
1715define i64 @f25(double %x) #0 {
1716; X87-LABEL: f25:
1717; X87:       # %bb.0: # %entry
1718; X87-NEXT:    subl $12, %esp
1719; X87-NEXT:    .cfi_def_cfa_offset 16
1720; X87-NEXT:    fldl {{[0-9]+}}(%esp)
1721; X87-NEXT:    fstpl (%esp)
1722; X87-NEXT:    wait
1723; X87-NEXT:    calll llrint
1724; X87-NEXT:    addl $12, %esp
1725; X87-NEXT:    .cfi_def_cfa_offset 4
1726; X87-NEXT:    retl
1727;
1728; X86-SSE-LABEL: f25:
1729; X86-SSE:       # %bb.0: # %entry
1730; X86-SSE-NEXT:    subl $12, %esp
1731; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
1732; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
1733; X86-SSE-NEXT:    movsd %xmm0, (%esp)
1734; X86-SSE-NEXT:    calll llrint
1735; X86-SSE-NEXT:    addl $12, %esp
1736; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
1737; X86-SSE-NEXT:    retl
1738;
1739; SSE-LABEL: f25:
1740; SSE:       # %bb.0: # %entry
1741; SSE-NEXT:    pushq %rax
1742; SSE-NEXT:    .cfi_def_cfa_offset 16
1743; SSE-NEXT:    callq llrint@PLT
1744; SSE-NEXT:    popq %rcx
1745; SSE-NEXT:    .cfi_def_cfa_offset 8
1746; SSE-NEXT:    retq
1747;
1748; AVX-LABEL: f25:
1749; AVX:       # %bb.0: # %entry
1750; AVX-NEXT:    pushq %rax
1751; AVX-NEXT:    .cfi_def_cfa_offset 16
1752; AVX-NEXT:    callq llrint@PLT
1753; AVX-NEXT:    popq %rcx
1754; AVX-NEXT:    .cfi_def_cfa_offset 8
1755; AVX-NEXT:    retq
1756entry:
1757  %result = call i64 @llvm.experimental.constrained.llrint.i64.f64(double %x,
1758                                               metadata !"round.dynamic",
1759                                               metadata !"fpexcept.strict") #0
1760  ret i64 %result
1761}
1762
1763define i64 @f26(float %x) #0 {
1764; X87-LABEL: f26:
1765; X87:       # %bb.0: # %entry
1766; X87-NEXT:    subl $12, %esp
1767; X87-NEXT:    .cfi_def_cfa_offset 16
1768; X87-NEXT:    flds {{[0-9]+}}(%esp)
1769; X87-NEXT:    fstps (%esp)
1770; X87-NEXT:    wait
1771; X87-NEXT:    calll llrintf
1772; X87-NEXT:    addl $12, %esp
1773; X87-NEXT:    .cfi_def_cfa_offset 4
1774; X87-NEXT:    retl
1775;
1776; X86-SSE-LABEL: f26:
1777; X86-SSE:       # %bb.0: # %entry
1778; X86-SSE-NEXT:    subl $12, %esp
1779; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
1780; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
1781; X86-SSE-NEXT:    movss %xmm0, (%esp)
1782; X86-SSE-NEXT:    calll llrintf
1783; X86-SSE-NEXT:    addl $12, %esp
1784; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
1785; X86-SSE-NEXT:    retl
1786;
1787; SSE-LABEL: f26:
1788; SSE:       # %bb.0: # %entry
1789; SSE-NEXT:    pushq %rax
1790; SSE-NEXT:    .cfi_def_cfa_offset 16
1791; SSE-NEXT:    callq llrintf@PLT
1792; SSE-NEXT:    popq %rcx
1793; SSE-NEXT:    .cfi_def_cfa_offset 8
1794; SSE-NEXT:    retq
1795;
1796; AVX-LABEL: f26:
1797; AVX:       # %bb.0: # %entry
1798; AVX-NEXT:    pushq %rax
1799; AVX-NEXT:    .cfi_def_cfa_offset 16
1800; AVX-NEXT:    callq llrintf@PLT
1801; AVX-NEXT:    popq %rcx
1802; AVX-NEXT:    .cfi_def_cfa_offset 8
1803; AVX-NEXT:    retq
1804entry:
1805  %result = call i64 @llvm.experimental.constrained.llrint.i64.f32(float %x,
1806                                               metadata !"round.dynamic",
1807                                               metadata !"fpexcept.strict") #0
1808  ret i64 %result
1809}
1810
1811define i32 @f27(double %x) #0 {
1812; X87-LABEL: f27:
1813; X87:       # %bb.0: # %entry
1814; X87-NEXT:    subl $12, %esp
1815; X87-NEXT:    .cfi_def_cfa_offset 16
1816; X87-NEXT:    fldl {{[0-9]+}}(%esp)
1817; X87-NEXT:    fstpl (%esp)
1818; X87-NEXT:    wait
1819; X87-NEXT:    calll lround
1820; X87-NEXT:    addl $12, %esp
1821; X87-NEXT:    .cfi_def_cfa_offset 4
1822; X87-NEXT:    retl
1823;
1824; X86-SSE-LABEL: f27:
1825; X86-SSE:       # %bb.0: # %entry
1826; X86-SSE-NEXT:    subl $12, %esp
1827; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
1828; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
1829; X86-SSE-NEXT:    movsd %xmm0, (%esp)
1830; X86-SSE-NEXT:    calll lround
1831; X86-SSE-NEXT:    addl $12, %esp
1832; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
1833; X86-SSE-NEXT:    retl
1834;
1835; SSE-LABEL: f27:
1836; SSE:       # %bb.0: # %entry
1837; SSE-NEXT:    pushq %rax
1838; SSE-NEXT:    .cfi_def_cfa_offset 16
1839; SSE-NEXT:    callq lround@PLT
1840; SSE-NEXT:    popq %rcx
1841; SSE-NEXT:    .cfi_def_cfa_offset 8
1842; SSE-NEXT:    retq
1843;
1844; AVX-LABEL: f27:
1845; AVX:       # %bb.0: # %entry
1846; AVX-NEXT:    pushq %rax
1847; AVX-NEXT:    .cfi_def_cfa_offset 16
1848; AVX-NEXT:    callq lround@PLT
1849; AVX-NEXT:    popq %rcx
1850; AVX-NEXT:    .cfi_def_cfa_offset 8
1851; AVX-NEXT:    retq
1852entry:
1853  %result = call i32 @llvm.experimental.constrained.lround.i32.f64(double %x,
1854                                               metadata !"fpexcept.strict") #0
1855  ret i32 %result
1856}
1857
1858define i32 @f28(float %x) #0 {
1859; X87-LABEL: f28:
1860; X87:       # %bb.0: # %entry
1861; X87-NEXT:    subl $12, %esp
1862; X87-NEXT:    .cfi_def_cfa_offset 16
1863; X87-NEXT:    flds {{[0-9]+}}(%esp)
1864; X87-NEXT:    fstps (%esp)
1865; X87-NEXT:    wait
1866; X87-NEXT:    calll lroundf
1867; X87-NEXT:    addl $12, %esp
1868; X87-NEXT:    .cfi_def_cfa_offset 4
1869; X87-NEXT:    retl
1870;
1871; X86-SSE-LABEL: f28:
1872; X86-SSE:       # %bb.0: # %entry
1873; X86-SSE-NEXT:    subl $12, %esp
1874; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
1875; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
1876; X86-SSE-NEXT:    movss %xmm0, (%esp)
1877; X86-SSE-NEXT:    calll lroundf
1878; X86-SSE-NEXT:    addl $12, %esp
1879; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
1880; X86-SSE-NEXT:    retl
1881;
1882; SSE-LABEL: f28:
1883; SSE:       # %bb.0: # %entry
1884; SSE-NEXT:    pushq %rax
1885; SSE-NEXT:    .cfi_def_cfa_offset 16
1886; SSE-NEXT:    callq lroundf@PLT
1887; SSE-NEXT:    popq %rcx
1888; SSE-NEXT:    .cfi_def_cfa_offset 8
1889; SSE-NEXT:    retq
1890;
1891; AVX-LABEL: f28:
1892; AVX:       # %bb.0: # %entry
1893; AVX-NEXT:    pushq %rax
1894; AVX-NEXT:    .cfi_def_cfa_offset 16
1895; AVX-NEXT:    callq lroundf@PLT
1896; AVX-NEXT:    popq %rcx
1897; AVX-NEXT:    .cfi_def_cfa_offset 8
1898; AVX-NEXT:    retq
1899entry:
1900  %result = call i32 @llvm.experimental.constrained.lround.i32.f32(float %x,
1901                                               metadata !"fpexcept.strict") #0
1902  ret i32 %result
1903}
1904
1905define i64 @f29(double %x) #0 {
1906; X87-LABEL: f29:
1907; X87:       # %bb.0: # %entry
1908; X87-NEXT:    subl $12, %esp
1909; X87-NEXT:    .cfi_def_cfa_offset 16
1910; X87-NEXT:    fldl {{[0-9]+}}(%esp)
1911; X87-NEXT:    fstpl (%esp)
1912; X87-NEXT:    wait
1913; X87-NEXT:    calll llround
1914; X87-NEXT:    addl $12, %esp
1915; X87-NEXT:    .cfi_def_cfa_offset 4
1916; X87-NEXT:    retl
1917;
1918; X86-SSE-LABEL: f29:
1919; X86-SSE:       # %bb.0: # %entry
1920; X86-SSE-NEXT:    subl $12, %esp
1921; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
1922; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
1923; X86-SSE-NEXT:    movsd %xmm0, (%esp)
1924; X86-SSE-NEXT:    calll llround
1925; X86-SSE-NEXT:    addl $12, %esp
1926; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
1927; X86-SSE-NEXT:    retl
1928;
1929; SSE-LABEL: f29:
1930; SSE:       # %bb.0: # %entry
1931; SSE-NEXT:    pushq %rax
1932; SSE-NEXT:    .cfi_def_cfa_offset 16
1933; SSE-NEXT:    callq llround@PLT
1934; SSE-NEXT:    popq %rcx
1935; SSE-NEXT:    .cfi_def_cfa_offset 8
1936; SSE-NEXT:    retq
1937;
1938; AVX-LABEL: f29:
1939; AVX:       # %bb.0: # %entry
1940; AVX-NEXT:    pushq %rax
1941; AVX-NEXT:    .cfi_def_cfa_offset 16
1942; AVX-NEXT:    callq llround@PLT
1943; AVX-NEXT:    popq %rcx
1944; AVX-NEXT:    .cfi_def_cfa_offset 8
1945; AVX-NEXT:    retq
1946entry:
1947  %result = call i64 @llvm.experimental.constrained.llround.i64.f64(double %x,
1948                                               metadata !"fpexcept.strict") #0
1949  ret i64 %result
1950}
1951
1952define i64 @f30(float %x) #0 {
1953; X87-LABEL: f30:
1954; X87:       # %bb.0: # %entry
1955; X87-NEXT:    subl $12, %esp
1956; X87-NEXT:    .cfi_def_cfa_offset 16
1957; X87-NEXT:    flds {{[0-9]+}}(%esp)
1958; X87-NEXT:    fstps (%esp)
1959; X87-NEXT:    wait
1960; X87-NEXT:    calll llroundf
1961; X87-NEXT:    addl $12, %esp
1962; X87-NEXT:    .cfi_def_cfa_offset 4
1963; X87-NEXT:    retl
1964;
1965; X86-SSE-LABEL: f30:
1966; X86-SSE:       # %bb.0: # %entry
1967; X86-SSE-NEXT:    subl $12, %esp
1968; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
1969; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
1970; X86-SSE-NEXT:    movss %xmm0, (%esp)
1971; X86-SSE-NEXT:    calll llroundf
1972; X86-SSE-NEXT:    addl $12, %esp
1973; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
1974; X86-SSE-NEXT:    retl
1975;
1976; SSE-LABEL: f30:
1977; SSE:       # %bb.0: # %entry
1978; SSE-NEXT:    pushq %rax
1979; SSE-NEXT:    .cfi_def_cfa_offset 16
1980; SSE-NEXT:    callq llroundf@PLT
1981; SSE-NEXT:    popq %rcx
1982; SSE-NEXT:    .cfi_def_cfa_offset 8
1983; SSE-NEXT:    retq
1984;
1985; AVX-LABEL: f30:
1986; AVX:       # %bb.0: # %entry
1987; AVX-NEXT:    pushq %rax
1988; AVX-NEXT:    .cfi_def_cfa_offset 16
1989; AVX-NEXT:    callq llroundf@PLT
1990; AVX-NEXT:    popq %rcx
1991; AVX-NEXT:    .cfi_def_cfa_offset 8
1992; AVX-NEXT:    retq
1993entry:
1994  %result = call i64 @llvm.experimental.constrained.llround.i64.f32(float %x,
1995                                               metadata !"fpexcept.strict") #0
1996  ret i64 %result
1997}
1998
1999; Verify that sitofp(%x) isn't simplified when the rounding mode is
2000; unknown.
2001; Verify that no gross errors happen.
2002define double @sifdb(i8 %x) #0 {
2003; X87-LABEL: sifdb:
2004; X87:       # %bb.0: # %entry
2005; X87-NEXT:    pushl %eax
2006; X87-NEXT:    .cfi_def_cfa_offset 8
2007; X87-NEXT:    movsbl {{[0-9]+}}(%esp), %eax
2008; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
2009; X87-NEXT:    filds {{[0-9]+}}(%esp)
2010; X87-NEXT:    wait
2011; X87-NEXT:    popl %eax
2012; X87-NEXT:    .cfi_def_cfa_offset 4
2013; X87-NEXT:    retl
2014;
2015; X86-SSE-LABEL: sifdb:
2016; X86-SSE:       # %bb.0: # %entry
2017; X86-SSE-NEXT:    subl $12, %esp
2018; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
2019; X86-SSE-NEXT:    movsbl {{[0-9]+}}(%esp), %eax
2020; X86-SSE-NEXT:    cvtsi2sd %eax, %xmm0
2021; X86-SSE-NEXT:    movsd %xmm0, (%esp)
2022; X86-SSE-NEXT:    fldl (%esp)
2023; X86-SSE-NEXT:    wait
2024; X86-SSE-NEXT:    addl $12, %esp
2025; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
2026; X86-SSE-NEXT:    retl
2027;
2028; SSE-LABEL: sifdb:
2029; SSE:       # %bb.0: # %entry
2030; SSE-NEXT:    movsbl %dil, %eax
2031; SSE-NEXT:    cvtsi2sd %eax, %xmm0
2032; SSE-NEXT:    retq
2033;
2034; AVX-LABEL: sifdb:
2035; AVX:       # %bb.0: # %entry
2036; AVX-NEXT:    movsbl %dil, %eax
2037; AVX-NEXT:    vcvtsi2sd %eax, %xmm0, %xmm0
2038; AVX-NEXT:    retq
2039entry:
2040  %result = call double @llvm.experimental.constrained.sitofp.f64.i8(i8 %x,
2041                                               metadata !"round.dynamic",
2042                                               metadata !"fpexcept.strict") #0
2043  ret double %result
2044}
2045
2046define double @sifdw(i16 %x) #0 {
2047; X87-LABEL: sifdw:
2048; X87:       # %bb.0: # %entry
2049; X87-NEXT:    pushl %eax
2050; X87-NEXT:    .cfi_def_cfa_offset 8
2051; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
2052; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
2053; X87-NEXT:    filds {{[0-9]+}}(%esp)
2054; X87-NEXT:    wait
2055; X87-NEXT:    popl %eax
2056; X87-NEXT:    .cfi_def_cfa_offset 4
2057; X87-NEXT:    retl
2058;
2059; X86-SSE-LABEL: sifdw:
2060; X86-SSE:       # %bb.0: # %entry
2061; X86-SSE-NEXT:    subl $12, %esp
2062; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
2063; X86-SSE-NEXT:    movswl {{[0-9]+}}(%esp), %eax
2064; X86-SSE-NEXT:    cvtsi2sd %eax, %xmm0
2065; X86-SSE-NEXT:    movsd %xmm0, (%esp)
2066; X86-SSE-NEXT:    fldl (%esp)
2067; X86-SSE-NEXT:    wait
2068; X86-SSE-NEXT:    addl $12, %esp
2069; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
2070; X86-SSE-NEXT:    retl
2071;
2072; SSE-LABEL: sifdw:
2073; SSE:       # %bb.0: # %entry
2074; SSE-NEXT:    movswl %di, %eax
2075; SSE-NEXT:    cvtsi2sd %eax, %xmm0
2076; SSE-NEXT:    retq
2077;
2078; AVX-LABEL: sifdw:
2079; AVX:       # %bb.0: # %entry
2080; AVX-NEXT:    movswl %di, %eax
2081; AVX-NEXT:    vcvtsi2sd %eax, %xmm0, %xmm0
2082; AVX-NEXT:    retq
2083entry:
2084  %result = call double @llvm.experimental.constrained.sitofp.f64.i16(i16 %x,
2085                                               metadata !"round.dynamic",
2086                                               metadata !"fpexcept.strict") #0
2087  ret double %result
2088}
2089
2090define double @sifdi(i32 %x) #0 {
2091; X87-LABEL: sifdi:
2092; X87:       # %bb.0: # %entry
2093; X87-NEXT:    pushl %eax
2094; X87-NEXT:    .cfi_def_cfa_offset 8
2095; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
2096; X87-NEXT:    movl %eax, (%esp)
2097; X87-NEXT:    fildl (%esp)
2098; X87-NEXT:    wait
2099; X87-NEXT:    popl %eax
2100; X87-NEXT:    .cfi_def_cfa_offset 4
2101; X87-NEXT:    retl
2102;
2103; X86-SSE-LABEL: sifdi:
2104; X86-SSE:       # %bb.0: # %entry
2105; X86-SSE-NEXT:    subl $12, %esp
2106; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
2107; X86-SSE-NEXT:    cvtsi2sdl {{[0-9]+}}(%esp), %xmm0
2108; X86-SSE-NEXT:    movsd %xmm0, (%esp)
2109; X86-SSE-NEXT:    fldl (%esp)
2110; X86-SSE-NEXT:    wait
2111; X86-SSE-NEXT:    addl $12, %esp
2112; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
2113; X86-SSE-NEXT:    retl
2114;
2115; SSE-LABEL: sifdi:
2116; SSE:       # %bb.0: # %entry
2117; SSE-NEXT:    cvtsi2sd %edi, %xmm0
2118; SSE-NEXT:    retq
2119;
2120; AVX-LABEL: sifdi:
2121; AVX:       # %bb.0: # %entry
2122; AVX-NEXT:    vcvtsi2sd %edi, %xmm0, %xmm0
2123; AVX-NEXT:    retq
2124entry:
2125  %result = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %x,
2126                                               metadata !"round.dynamic",
2127                                               metadata !"fpexcept.strict") #0
2128  ret double %result
2129}
2130
2131define float @siffb(i8 %x) #0 {
2132; X87-LABEL: siffb:
2133; X87:       # %bb.0: # %entry
2134; X87-NEXT:    pushl %eax
2135; X87-NEXT:    .cfi_def_cfa_offset 8
2136; X87-NEXT:    movsbl {{[0-9]+}}(%esp), %eax
2137; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
2138; X87-NEXT:    filds {{[0-9]+}}(%esp)
2139; X87-NEXT:    wait
2140; X87-NEXT:    popl %eax
2141; X87-NEXT:    .cfi_def_cfa_offset 4
2142; X87-NEXT:    retl
2143;
2144; X86-SSE-LABEL: siffb:
2145; X86-SSE:       # %bb.0: # %entry
2146; X86-SSE-NEXT:    pushl %eax
2147; X86-SSE-NEXT:    .cfi_def_cfa_offset 8
2148; X86-SSE-NEXT:    movsbl {{[0-9]+}}(%esp), %eax
2149; X86-SSE-NEXT:    cvtsi2ss %eax, %xmm0
2150; X86-SSE-NEXT:    movss %xmm0, (%esp)
2151; X86-SSE-NEXT:    flds (%esp)
2152; X86-SSE-NEXT:    wait
2153; X86-SSE-NEXT:    popl %eax
2154; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
2155; X86-SSE-NEXT:    retl
2156;
2157; SSE-LABEL: siffb:
2158; SSE:       # %bb.0: # %entry
2159; SSE-NEXT:    movsbl %dil, %eax
2160; SSE-NEXT:    cvtsi2ss %eax, %xmm0
2161; SSE-NEXT:    retq
2162;
2163; AVX-LABEL: siffb:
2164; AVX:       # %bb.0: # %entry
2165; AVX-NEXT:    movsbl %dil, %eax
2166; AVX-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
2167; AVX-NEXT:    retq
2168entry:
2169  %result = call float @llvm.experimental.constrained.sitofp.f32.i8(i8 %x,
2170                                               metadata !"round.dynamic",
2171                                               metadata !"fpexcept.strict") #0
2172  ret float %result
2173}
2174
2175define float @siffw(i16 %x) #0 {
2176; X87-LABEL: siffw:
2177; X87:       # %bb.0: # %entry
2178; X87-NEXT:    pushl %eax
2179; X87-NEXT:    .cfi_def_cfa_offset 8
2180; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
2181; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
2182; X87-NEXT:    filds {{[0-9]+}}(%esp)
2183; X87-NEXT:    wait
2184; X87-NEXT:    popl %eax
2185; X87-NEXT:    .cfi_def_cfa_offset 4
2186; X87-NEXT:    retl
2187;
2188; X86-SSE-LABEL: siffw:
2189; X86-SSE:       # %bb.0: # %entry
2190; X86-SSE-NEXT:    pushl %eax
2191; X86-SSE-NEXT:    .cfi_def_cfa_offset 8
2192; X86-SSE-NEXT:    movswl {{[0-9]+}}(%esp), %eax
2193; X86-SSE-NEXT:    cvtsi2ss %eax, %xmm0
2194; X86-SSE-NEXT:    movss %xmm0, (%esp)
2195; X86-SSE-NEXT:    flds (%esp)
2196; X86-SSE-NEXT:    wait
2197; X86-SSE-NEXT:    popl %eax
2198; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
2199; X86-SSE-NEXT:    retl
2200;
2201; SSE-LABEL: siffw:
2202; SSE:       # %bb.0: # %entry
2203; SSE-NEXT:    movswl %di, %eax
2204; SSE-NEXT:    cvtsi2ss %eax, %xmm0
2205; SSE-NEXT:    retq
2206;
2207; AVX-LABEL: siffw:
2208; AVX:       # %bb.0: # %entry
2209; AVX-NEXT:    movswl %di, %eax
2210; AVX-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
2211; AVX-NEXT:    retq
2212entry:
2213  %result = call float @llvm.experimental.constrained.sitofp.f32.i16(i16 %x,
2214                                               metadata !"round.dynamic",
2215                                               metadata !"fpexcept.strict") #0
2216  ret float %result
2217}
2218
2219define float @siffi(i32 %x) #0 {
2220; X87-LABEL: siffi:
2221; X87:       # %bb.0: # %entry
2222; X87-NEXT:    pushl %eax
2223; X87-NEXT:    .cfi_def_cfa_offset 8
2224; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
2225; X87-NEXT:    movl %eax, (%esp)
2226; X87-NEXT:    fildl (%esp)
2227; X87-NEXT:    wait
2228; X87-NEXT:    popl %eax
2229; X87-NEXT:    .cfi_def_cfa_offset 4
2230; X87-NEXT:    retl
2231;
2232; X86-SSE-LABEL: siffi:
2233; X86-SSE:       # %bb.0: # %entry
2234; X86-SSE-NEXT:    pushl %eax
2235; X86-SSE-NEXT:    .cfi_def_cfa_offset 8
2236; X86-SSE-NEXT:    cvtsi2ssl {{[0-9]+}}(%esp), %xmm0
2237; X86-SSE-NEXT:    movss %xmm0, (%esp)
2238; X86-SSE-NEXT:    flds (%esp)
2239; X86-SSE-NEXT:    wait
2240; X86-SSE-NEXT:    popl %eax
2241; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
2242; X86-SSE-NEXT:    retl
2243;
2244; SSE-LABEL: siffi:
2245; SSE:       # %bb.0: # %entry
2246; SSE-NEXT:    cvtsi2ss %edi, %xmm0
2247; SSE-NEXT:    retq
2248;
2249; AVX-LABEL: siffi:
2250; AVX:       # %bb.0: # %entry
2251; AVX-NEXT:    vcvtsi2ss %edi, %xmm0, %xmm0
2252; AVX-NEXT:    retq
2253entry:
2254  %result = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %x,
2255                                               metadata !"round.dynamic",
2256                                               metadata !"fpexcept.strict") #0
2257  ret float %result
2258}
2259
2260define double @sifdl(i64 %x) #0 {
2261; X87-LABEL: sifdl:
2262; X87:       # %bb.0: # %entry
2263; X87-NEXT:    fildll {{[0-9]+}}(%esp)
2264; X87-NEXT:    wait
2265; X87-NEXT:    retl
2266;
2267; X86-SSE-LABEL: sifdl:
2268; X86-SSE:       # %bb.0: # %entry
2269; X86-SSE-NEXT:    subl $12, %esp
2270; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
2271; X86-SSE-NEXT:    fildll {{[0-9]+}}(%esp)
2272; X86-SSE-NEXT:    fstpl (%esp)
2273; X86-SSE-NEXT:    fldl (%esp)
2274; X86-SSE-NEXT:    wait
2275; X86-SSE-NEXT:    addl $12, %esp
2276; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
2277; X86-SSE-NEXT:    retl
2278;
2279; SSE-LABEL: sifdl:
2280; SSE:       # %bb.0: # %entry
2281; SSE-NEXT:    cvtsi2sd %rdi, %xmm0
2282; SSE-NEXT:    retq
2283;
2284; AVX-LABEL: sifdl:
2285; AVX:       # %bb.0: # %entry
2286; AVX-NEXT:    vcvtsi2sd %rdi, %xmm0, %xmm0
2287; AVX-NEXT:    retq
2288entry:
2289  %result = call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %x,
2290                                               metadata !"round.dynamic",
2291                                               metadata !"fpexcept.strict") #0
2292  ret double %result
2293}
2294
2295define float @siffl(i64 %x) #0 {
2296; X87-LABEL: siffl:
2297; X87:       # %bb.0: # %entry
2298; X87-NEXT:    fildll {{[0-9]+}}(%esp)
2299; X87-NEXT:    wait
2300; X87-NEXT:    retl
2301;
2302; X86-SSE-LABEL: siffl:
2303; X86-SSE:       # %bb.0: # %entry
2304; X86-SSE-NEXT:    pushl %eax
2305; X86-SSE-NEXT:    .cfi_def_cfa_offset 8
2306; X86-SSE-NEXT:    fildll {{[0-9]+}}(%esp)
2307; X86-SSE-NEXT:    fstps (%esp)
2308; X86-SSE-NEXT:    flds (%esp)
2309; X86-SSE-NEXT:    wait
2310; X86-SSE-NEXT:    popl %eax
2311; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
2312; X86-SSE-NEXT:    retl
2313;
2314; SSE-LABEL: siffl:
2315; SSE:       # %bb.0: # %entry
2316; SSE-NEXT:    cvtsi2ss %rdi, %xmm0
2317; SSE-NEXT:    retq
2318;
2319; AVX-LABEL: siffl:
2320; AVX:       # %bb.0: # %entry
2321; AVX-NEXT:    vcvtsi2ss %rdi, %xmm0, %xmm0
2322; AVX-NEXT:    retq
2323entry:
2324  %result = call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %x,
2325                                               metadata !"round.dynamic",
2326                                               metadata !"fpexcept.strict") #0
2327  ret float %result
2328}
2329
2330; Verify that uitofp(%x) isn't simplified when the rounding mode is
2331; unknown.
2332; Verify that no gross errors happen.
2333define double @uifdb(i8 %x) #0 {
2334; X87-LABEL: uifdb:
2335; X87:       # %bb.0: # %entry
2336; X87-NEXT:    pushl %eax
2337; X87-NEXT:    .cfi_def_cfa_offset 8
2338; X87-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
2339; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
2340; X87-NEXT:    filds {{[0-9]+}}(%esp)
2341; X87-NEXT:    wait
2342; X87-NEXT:    popl %eax
2343; X87-NEXT:    .cfi_def_cfa_offset 4
2344; X87-NEXT:    retl
2345;
2346; X86-SSE-LABEL: uifdb:
2347; X86-SSE:       # %bb.0: # %entry
2348; X86-SSE-NEXT:    subl $12, %esp
2349; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
2350; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
2351; X86-SSE-NEXT:    cvtsi2sd %eax, %xmm0
2352; X86-SSE-NEXT:    movsd %xmm0, (%esp)
2353; X86-SSE-NEXT:    fldl (%esp)
2354; X86-SSE-NEXT:    wait
2355; X86-SSE-NEXT:    addl $12, %esp
2356; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
2357; X86-SSE-NEXT:    retl
2358;
2359; SSE-LABEL: uifdb:
2360; SSE:       # %bb.0: # %entry
2361; SSE-NEXT:    movzbl %dil, %eax
2362; SSE-NEXT:    cvtsi2sd %eax, %xmm0
2363; SSE-NEXT:    retq
2364;
2365; AVX-LABEL: uifdb:
2366; AVX:       # %bb.0: # %entry
2367; AVX-NEXT:    movzbl %dil, %eax
2368; AVX-NEXT:    vcvtsi2sd %eax, %xmm0, %xmm0
2369; AVX-NEXT:    retq
2370entry:
2371  %result = call double @llvm.experimental.constrained.uitofp.f64.i8(i8 %x,
2372                                               metadata !"round.dynamic",
2373                                               metadata !"fpexcept.strict") #0
2374  ret double %result
2375}
2376
2377define double @uifdw(i16 %x) #0 {
2378; X87-LABEL: uifdw:
2379; X87:       # %bb.0: # %entry
2380; X87-NEXT:    pushl %eax
2381; X87-NEXT:    .cfi_def_cfa_offset 8
2382; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
2383; X87-NEXT:    movl %eax, (%esp)
2384; X87-NEXT:    fildl (%esp)
2385; X87-NEXT:    wait
2386; X87-NEXT:    popl %eax
2387; X87-NEXT:    .cfi_def_cfa_offset 4
2388; X87-NEXT:    retl
2389;
2390; X86-SSE-LABEL: uifdw:
2391; X86-SSE:       # %bb.0: # %entry
2392; X86-SSE-NEXT:    subl $12, %esp
2393; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
2394; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
2395; X86-SSE-NEXT:    cvtsi2sd %eax, %xmm0
2396; X86-SSE-NEXT:    movsd %xmm0, (%esp)
2397; X86-SSE-NEXT:    fldl (%esp)
2398; X86-SSE-NEXT:    wait
2399; X86-SSE-NEXT:    addl $12, %esp
2400; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
2401; X86-SSE-NEXT:    retl
2402;
2403; SSE-LABEL: uifdw:
2404; SSE:       # %bb.0: # %entry
2405; SSE-NEXT:    movzwl %di, %eax
2406; SSE-NEXT:    cvtsi2sd %eax, %xmm0
2407; SSE-NEXT:    retq
2408;
2409; AVX-LABEL: uifdw:
2410; AVX:       # %bb.0: # %entry
2411; AVX-NEXT:    movzwl %di, %eax
2412; AVX-NEXT:    vcvtsi2sd %eax, %xmm0, %xmm0
2413; AVX-NEXT:    retq
2414entry:
2415  %result = call double @llvm.experimental.constrained.uitofp.f64.i16(i16 %x,
2416                                               metadata !"round.dynamic",
2417                                               metadata !"fpexcept.strict") #0
2418  ret double %result
2419}
2420
2421define double @uifdi(i32 %x) #0 {
2422; X87-LABEL: uifdi:
2423; X87:       # %bb.0: # %entry
2424; X87-NEXT:    subl $12, %esp
2425; X87-NEXT:    .cfi_def_cfa_offset 16
2426; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
2427; X87-NEXT:    movl %eax, (%esp)
2428; X87-NEXT:    movl $0, {{[0-9]+}}(%esp)
2429; X87-NEXT:    fildll (%esp)
2430; X87-NEXT:    wait
2431; X87-NEXT:    addl $12, %esp
2432; X87-NEXT:    .cfi_def_cfa_offset 4
2433; X87-NEXT:    retl
2434;
2435; X86-SSE-LABEL: uifdi:
2436; X86-SSE:       # %bb.0: # %entry
2437; X86-SSE-NEXT:    subl $12, %esp
2438; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
2439; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
2440; X86-SSE-NEXT:    orpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
2441; X86-SSE-NEXT:    subsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
2442; X86-SSE-NEXT:    movsd %xmm0, (%esp)
2443; X86-SSE-NEXT:    fldl (%esp)
2444; X86-SSE-NEXT:    wait
2445; X86-SSE-NEXT:    addl $12, %esp
2446; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
2447; X86-SSE-NEXT:    retl
2448;
2449; SSE-LABEL: uifdi:
2450; SSE:       # %bb.0: # %entry
2451; SSE-NEXT:    movl %edi, %eax
2452; SSE-NEXT:    cvtsi2sd %rax, %xmm0
2453; SSE-NEXT:    retq
2454;
2455; AVX1-LABEL: uifdi:
2456; AVX1:       # %bb.0: # %entry
2457; AVX1-NEXT:    movl %edi, %eax
2458; AVX1-NEXT:    vcvtsi2sd %rax, %xmm0, %xmm0
2459; AVX1-NEXT:    retq
2460;
2461; AVX512-LABEL: uifdi:
2462; AVX512:       # %bb.0: # %entry
2463; AVX512-NEXT:    vcvtusi2sd %edi, %xmm0, %xmm0
2464; AVX512-NEXT:    retq
2465entry:
2466  %result = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %x,
2467                                               metadata !"round.dynamic",
2468                                               metadata !"fpexcept.strict") #0
2469  ret double %result
2470}
2471
2472define double @uifdl(i64 %x) #0 {
2473; X87-LABEL: uifdl:
2474; X87:       # %bb.0: # %entry
2475; X87-NEXT:    subl $20, %esp
2476; X87-NEXT:    .cfi_def_cfa_offset 24
2477; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
2478; X87-NEXT:    movl {{[0-9]+}}(%esp), %ecx
2479; X87-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
2480; X87-NEXT:    movl %eax, (%esp)
2481; X87-NEXT:    shrl $31, %ecx
2482; X87-NEXT:    fildll (%esp)
2483; X87-NEXT:    fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%ecx,4)
2484; X87-NEXT:    fstpl {{[0-9]+}}(%esp)
2485; X87-NEXT:    fldl {{[0-9]+}}(%esp)
2486; X87-NEXT:    wait
2487; X87-NEXT:    addl $20, %esp
2488; X87-NEXT:    .cfi_def_cfa_offset 4
2489; X87-NEXT:    retl
2490;
2491; X86-SSE-LABEL: uifdl:
2492; X86-SSE:       # %bb.0: # %entry
2493; X86-SSE-NEXT:    subl $28, %esp
2494; X86-SSE-NEXT:    .cfi_def_cfa_offset 32
2495; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
2496; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
2497; X86-SSE-NEXT:    movlps %xmm0, {{[0-9]+}}(%esp)
2498; X86-SSE-NEXT:    shrl $31, %eax
2499; X86-SSE-NEXT:    fildll {{[0-9]+}}(%esp)
2500; X86-SSE-NEXT:    fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%eax,4)
2501; X86-SSE-NEXT:    fstpl {{[0-9]+}}(%esp)
2502; X86-SSE-NEXT:    wait
2503; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
2504; X86-SSE-NEXT:    movsd %xmm0, (%esp)
2505; X86-SSE-NEXT:    fldl (%esp)
2506; X86-SSE-NEXT:    wait
2507; X86-SSE-NEXT:    addl $28, %esp
2508; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
2509; X86-SSE-NEXT:    retl
2510;
2511; SSE-LABEL: uifdl:
2512; SSE:       # %bb.0: # %entry
2513; SSE-NEXT:    movq %rdi, %rax
2514; SSE-NEXT:    shrq %rax
2515; SSE-NEXT:    movl %edi, %ecx
2516; SSE-NEXT:    andl $1, %ecx
2517; SSE-NEXT:    orq %rax, %rcx
2518; SSE-NEXT:    testq %rdi, %rdi
2519; SSE-NEXT:    cmovnsq %rdi, %rcx
2520; SSE-NEXT:    cvtsi2sd %rcx, %xmm0
2521; SSE-NEXT:    jns .LBB48_2
2522; SSE-NEXT:  # %bb.1:
2523; SSE-NEXT:    addsd %xmm0, %xmm0
2524; SSE-NEXT:  .LBB48_2: # %entry
2525; SSE-NEXT:    retq
2526;
2527; AVX1-LABEL: uifdl:
2528; AVX1:       # %bb.0: # %entry
2529; AVX1-NEXT:    movq %rdi, %rax
2530; AVX1-NEXT:    shrq %rax
2531; AVX1-NEXT:    movl %edi, %ecx
2532; AVX1-NEXT:    andl $1, %ecx
2533; AVX1-NEXT:    orq %rax, %rcx
2534; AVX1-NEXT:    testq %rdi, %rdi
2535; AVX1-NEXT:    cmovnsq %rdi, %rcx
2536; AVX1-NEXT:    vcvtsi2sd %rcx, %xmm0, %xmm0
2537; AVX1-NEXT:    jns .LBB48_2
2538; AVX1-NEXT:  # %bb.1:
2539; AVX1-NEXT:    vaddsd %xmm0, %xmm0, %xmm0
2540; AVX1-NEXT:  .LBB48_2: # %entry
2541; AVX1-NEXT:    retq
2542;
2543; AVX512-LABEL: uifdl:
2544; AVX512:       # %bb.0: # %entry
2545; AVX512-NEXT:    vcvtusi2sd %rdi, %xmm0, %xmm0
2546; AVX512-NEXT:    retq
2547entry:
2548  %result = call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %x,
2549                                               metadata !"round.dynamic",
2550                                               metadata !"fpexcept.strict") #0
2551  ret double %result
2552}
2553
2554define float @uiffb(i8 %x) #0 {
2555; X87-LABEL: uiffb:
2556; X87:       # %bb.0: # %entry
2557; X87-NEXT:    pushl %eax
2558; X87-NEXT:    .cfi_def_cfa_offset 8
2559; X87-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
2560; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
2561; X87-NEXT:    filds {{[0-9]+}}(%esp)
2562; X87-NEXT:    wait
2563; X87-NEXT:    popl %eax
2564; X87-NEXT:    .cfi_def_cfa_offset 4
2565; X87-NEXT:    retl
2566;
2567; X86-SSE-LABEL: uiffb:
2568; X86-SSE:       # %bb.0: # %entry
2569; X86-SSE-NEXT:    pushl %eax
2570; X86-SSE-NEXT:    .cfi_def_cfa_offset 8
2571; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
2572; X86-SSE-NEXT:    cvtsi2ss %eax, %xmm0
2573; X86-SSE-NEXT:    movss %xmm0, (%esp)
2574; X86-SSE-NEXT:    flds (%esp)
2575; X86-SSE-NEXT:    wait
2576; X86-SSE-NEXT:    popl %eax
2577; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
2578; X86-SSE-NEXT:    retl
2579;
2580; SSE-LABEL: uiffb:
2581; SSE:       # %bb.0: # %entry
2582; SSE-NEXT:    movzbl %dil, %eax
2583; SSE-NEXT:    cvtsi2ss %eax, %xmm0
2584; SSE-NEXT:    retq
2585;
2586; AVX-LABEL: uiffb:
2587; AVX:       # %bb.0: # %entry
2588; AVX-NEXT:    movzbl %dil, %eax
2589; AVX-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
2590; AVX-NEXT:    retq
2591entry:
2592  %result = call float @llvm.experimental.constrained.uitofp.f32.i8(i8 %x,
2593                                               metadata !"round.dynamic",
2594                                               metadata !"fpexcept.strict") #0
2595  ret float %result
2596}
2597
2598define float @uiffw(i16 %x) #0 {
2599; X87-LABEL: uiffw:
2600; X87:       # %bb.0: # %entry
2601; X87-NEXT:    pushl %eax
2602; X87-NEXT:    .cfi_def_cfa_offset 8
2603; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
2604; X87-NEXT:    movl %eax, (%esp)
2605; X87-NEXT:    fildl (%esp)
2606; X87-NEXT:    wait
2607; X87-NEXT:    popl %eax
2608; X87-NEXT:    .cfi_def_cfa_offset 4
2609; X87-NEXT:    retl
2610;
2611; X86-SSE-LABEL: uiffw:
2612; X86-SSE:       # %bb.0: # %entry
2613; X86-SSE-NEXT:    pushl %eax
2614; X86-SSE-NEXT:    .cfi_def_cfa_offset 8
2615; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
2616; X86-SSE-NEXT:    cvtsi2ss %eax, %xmm0
2617; X86-SSE-NEXT:    movss %xmm0, (%esp)
2618; X86-SSE-NEXT:    flds (%esp)
2619; X86-SSE-NEXT:    wait
2620; X86-SSE-NEXT:    popl %eax
2621; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
2622; X86-SSE-NEXT:    retl
2623;
2624; SSE-LABEL: uiffw:
2625; SSE:       # %bb.0: # %entry
2626; SSE-NEXT:    movzwl %di, %eax
2627; SSE-NEXT:    cvtsi2ss %eax, %xmm0
2628; SSE-NEXT:    retq
2629;
2630; AVX-LABEL: uiffw:
2631; AVX:       # %bb.0: # %entry
2632; AVX-NEXT:    movzwl %di, %eax
2633; AVX-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
2634; AVX-NEXT:    retq
2635entry:
2636  %result = call float @llvm.experimental.constrained.uitofp.f32.i16(i16 %x,
2637                                               metadata !"round.dynamic",
2638                                               metadata !"fpexcept.strict") #0
2639  ret float %result
2640}
2641
2642define float @uiffi(i32 %x) #0 {
2643; X87-LABEL: uiffi:
2644; X87:       # %bb.0: # %entry
2645; X87-NEXT:    subl $12, %esp
2646; X87-NEXT:    .cfi_def_cfa_offset 16
2647; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
2648; X87-NEXT:    movl %eax, (%esp)
2649; X87-NEXT:    movl $0, {{[0-9]+}}(%esp)
2650; X87-NEXT:    fildll (%esp)
2651; X87-NEXT:    wait
2652; X87-NEXT:    addl $12, %esp
2653; X87-NEXT:    .cfi_def_cfa_offset 4
2654; X87-NEXT:    retl
2655;
2656; X86-SSE-LABEL: uiffi:
2657; X86-SSE:       # %bb.0: # %entry
2658; X86-SSE-NEXT:    pushl %eax
2659; X86-SSE-NEXT:    .cfi_def_cfa_offset 8
2660; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
2661; X86-SSE-NEXT:    orpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
2662; X86-SSE-NEXT:    subsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
2663; X86-SSE-NEXT:    cvtsd2ss %xmm0, %xmm0
2664; X86-SSE-NEXT:    movss %xmm0, (%esp)
2665; X86-SSE-NEXT:    flds (%esp)
2666; X86-SSE-NEXT:    wait
2667; X86-SSE-NEXT:    popl %eax
2668; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
2669; X86-SSE-NEXT:    retl
2670;
2671; SSE-LABEL: uiffi:
2672; SSE:       # %bb.0: # %entry
2673; SSE-NEXT:    movl %edi, %eax
2674; SSE-NEXT:    cvtsi2ss %rax, %xmm0
2675; SSE-NEXT:    retq
2676;
2677; AVX1-LABEL: uiffi:
2678; AVX1:       # %bb.0: # %entry
2679; AVX1-NEXT:    movl %edi, %eax
2680; AVX1-NEXT:    vcvtsi2ss %rax, %xmm0, %xmm0
2681; AVX1-NEXT:    retq
2682;
2683; AVX512-LABEL: uiffi:
2684; AVX512:       # %bb.0: # %entry
2685; AVX512-NEXT:    vcvtusi2ss %edi, %xmm0, %xmm0
2686; AVX512-NEXT:    retq
2687entry:
2688  %result = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %x,
2689                                               metadata !"round.dynamic",
2690                                               metadata !"fpexcept.strict") #0
2691  ret float %result
2692}
2693
2694define float @uiffl(i64 %x) #0 {
2695; X87-LABEL: uiffl:
2696; X87:       # %bb.0: # %entry
2697; X87-NEXT:    subl $20, %esp
2698; X87-NEXT:    .cfi_def_cfa_offset 24
2699; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
2700; X87-NEXT:    movl {{[0-9]+}}(%esp), %ecx
2701; X87-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
2702; X87-NEXT:    movl %eax, {{[0-9]+}}(%esp)
2703; X87-NEXT:    shrl $31, %ecx
2704; X87-NEXT:    fildll {{[0-9]+}}(%esp)
2705; X87-NEXT:    fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%ecx,4)
2706; X87-NEXT:    fstps {{[0-9]+}}(%esp)
2707; X87-NEXT:    flds {{[0-9]+}}(%esp)
2708; X87-NEXT:    wait
2709; X87-NEXT:    addl $20, %esp
2710; X87-NEXT:    .cfi_def_cfa_offset 4
2711; X87-NEXT:    retl
2712;
2713; X86-SSE-LABEL: uiffl:
2714; X86-SSE:       # %bb.0: # %entry
2715; X86-SSE-NEXT:    subl $20, %esp
2716; X86-SSE-NEXT:    .cfi_def_cfa_offset 24
2717; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
2718; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
2719; X86-SSE-NEXT:    movlps %xmm0, {{[0-9]+}}(%esp)
2720; X86-SSE-NEXT:    shrl $31, %eax
2721; X86-SSE-NEXT:    fildll {{[0-9]+}}(%esp)
2722; X86-SSE-NEXT:    fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%eax,4)
2723; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
2724; X86-SSE-NEXT:    wait
2725; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
2726; X86-SSE-NEXT:    movss %xmm0, (%esp)
2727; X86-SSE-NEXT:    flds (%esp)
2728; X86-SSE-NEXT:    wait
2729; X86-SSE-NEXT:    addl $20, %esp
2730; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
2731; X86-SSE-NEXT:    retl
2732;
2733; SSE-LABEL: uiffl:
2734; SSE:       # %bb.0: # %entry
2735; SSE-NEXT:    movq %rdi, %rax
2736; SSE-NEXT:    shrq %rax
2737; SSE-NEXT:    movl %edi, %ecx
2738; SSE-NEXT:    andl $1, %ecx
2739; SSE-NEXT:    orq %rax, %rcx
2740; SSE-NEXT:    testq %rdi, %rdi
2741; SSE-NEXT:    cmovnsq %rdi, %rcx
2742; SSE-NEXT:    cvtsi2ss %rcx, %xmm0
2743; SSE-NEXT:    jns .LBB52_2
2744; SSE-NEXT:  # %bb.1:
2745; SSE-NEXT:    addss %xmm0, %xmm0
2746; SSE-NEXT:  .LBB52_2: # %entry
2747; SSE-NEXT:    retq
2748;
2749; AVX1-LABEL: uiffl:
2750; AVX1:       # %bb.0: # %entry
2751; AVX1-NEXT:    movq %rdi, %rax
2752; AVX1-NEXT:    shrq %rax
2753; AVX1-NEXT:    movl %edi, %ecx
2754; AVX1-NEXT:    andl $1, %ecx
2755; AVX1-NEXT:    orq %rax, %rcx
2756; AVX1-NEXT:    testq %rdi, %rdi
2757; AVX1-NEXT:    cmovnsq %rdi, %rcx
2758; AVX1-NEXT:    vcvtsi2ss %rcx, %xmm0, %xmm0
2759; AVX1-NEXT:    jns .LBB52_2
2760; AVX1-NEXT:  # %bb.1:
2761; AVX1-NEXT:    vaddss %xmm0, %xmm0, %xmm0
2762; AVX1-NEXT:  .LBB52_2: # %entry
2763; AVX1-NEXT:    retq
2764;
2765; AVX512-LABEL: uiffl:
2766; AVX512:       # %bb.0: # %entry
2767; AVX512-NEXT:    vcvtusi2ss %rdi, %xmm0, %xmm0
2768; AVX512-NEXT:    retq
2769entry:
2770  %result = call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %x,
2771                                               metadata !"round.dynamic",
2772                                               metadata !"fpexcept.strict") #0
2773  ret float %result
2774}
2775
2776attributes #0 = { strictfp }
2777
2778@llvm.fp.env = thread_local global i8 zeroinitializer, section "llvm.metadata"
2779declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
2780declare double @llvm.experimental.constrained.fsub.f64(double, double, metadata, metadata)
2781declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata)
2782declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata)
2783declare double @llvm.experimental.constrained.frem.f64(double, double, metadata, metadata)
2784declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata)
2785declare double @llvm.experimental.constrained.pow.f64(double, double, metadata, metadata)
2786declare double @llvm.experimental.constrained.powi.f64(double, i32, metadata, metadata)
2787declare double @llvm.experimental.constrained.sin.f64(double, metadata, metadata)
2788declare double @llvm.experimental.constrained.cos.f64(double, metadata, metadata)
2789declare double @llvm.experimental.constrained.exp.f64(double, metadata, metadata)
2790declare double @llvm.experimental.constrained.exp2.f64(double, metadata, metadata)
2791declare double @llvm.experimental.constrained.log.f64(double, metadata, metadata)
2792declare double @llvm.experimental.constrained.log10.f64(double, metadata, metadata)
2793declare double @llvm.experimental.constrained.log2.f64(double, metadata, metadata)
2794declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
2795declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
2796declare i8  @llvm.experimental.constrained.fptosi.i8.f64(double, metadata)
2797declare i16 @llvm.experimental.constrained.fptosi.i16.f64(double, metadata)
2798declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
2799declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata)
2800declare i128 @llvm.experimental.constrained.fptosi.i128.f64(double, metadata)
2801declare i8  @llvm.experimental.constrained.fptoui.i8.f64(double, metadata)
2802declare i16 @llvm.experimental.constrained.fptoui.i16.f64(double, metadata)
2803declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
2804declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
2805declare i128 @llvm.experimental.constrained.fptoui.i128.f64(double, metadata)
2806declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata)
2807declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata)
2808declare i32 @llvm.experimental.constrained.lrint.i32.f64(double, metadata, metadata)
2809declare i32 @llvm.experimental.constrained.lrint.i32.f32(float, metadata, metadata)
2810declare i64 @llvm.experimental.constrained.llrint.i64.f64(double, metadata, metadata)
2811declare i64 @llvm.experimental.constrained.llrint.i64.f32(float, metadata, metadata)
2812declare i32 @llvm.experimental.constrained.lround.i32.f64(double, metadata)
2813declare i32 @llvm.experimental.constrained.lround.i32.f32(float, metadata)
2814declare i64 @llvm.experimental.constrained.llround.i64.f64(double, metadata)
2815declare i64 @llvm.experimental.constrained.llround.i64.f32(float, metadata)
2816declare double @llvm.experimental.constrained.sitofp.f64.i8(i8, metadata, metadata)
2817declare double @llvm.experimental.constrained.sitofp.f64.i16(i16, metadata, metadata)
2818declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
2819declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata)
2820declare float @llvm.experimental.constrained.sitofp.f32.i8(i8, metadata, metadata)
2821declare float @llvm.experimental.constrained.sitofp.f32.i16(i16, metadata, metadata)
2822declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
2823declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata)
2824declare double @llvm.experimental.constrained.uitofp.f64.i8(i8, metadata, metadata)
2825declare double @llvm.experimental.constrained.uitofp.f64.i16(i16, metadata, metadata)
2826declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
2827declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
2828declare float @llvm.experimental.constrained.uitofp.f32.i8(i8, metadata, metadata)
2829declare float @llvm.experimental.constrained.uitofp.f32.i16(i16, metadata, metadata)
2830declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
2831declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata)
2832