1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+sse | FileCheck %s --check-prefix=X64-SSE
3; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=+sse | FileCheck %s --check-prefix=X64-SSE
4; RUN: llc < %s -O2 -mtriple=i686-linux-gnu -mattr=+mmx | FileCheck %s --check-prefix=X32
5; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+avx | FileCheck %s --check-prefix=X64-AVX
6; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=+avx | FileCheck %s --check-prefix=X64-AVX
7; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+avx512f | FileCheck %s --check-prefix=X64-AVX
8; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=+avx512f | FileCheck %s --check-prefix=X64-AVX
9
10; Check soft floating point conversion function calls.
11
12@vi16 = common dso_local global i16 0, align 2
13@vi32 = common dso_local global i32 0, align 4
14@vi64 = common dso_local global i64 0, align 8
15@vi128 = common dso_local global i128 0, align 16
16@vu32 = common dso_local global i32 0, align 4
17@vu64 = common dso_local global i64 0, align 8
18@vu128 = common dso_local global i128 0, align 16
19@vf32 = common dso_local global float 0.000000e+00, align 4
20@vf64 = common dso_local global double 0.000000e+00, align 8
21@vf80 = common dso_local global x86_fp80 0xK00000000000000000000, align 8
22@vf128 = common dso_local global fp128 0xL00000000000000000000000000000000, align 16
23
24define dso_local void @TestFPExtF32_F128() nounwind {
25; X64-SSE-LABEL: TestFPExtF32_F128:
26; X64-SSE:       # %bb.0: # %entry
27; X64-SSE-NEXT:    pushq %rax
28; X64-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
29; X64-SSE-NEXT:    callq __extendsftf2@PLT
30; X64-SSE-NEXT:    movaps %xmm0, vf128(%rip)
31; X64-SSE-NEXT:    popq %rax
32; X64-SSE-NEXT:    retq
33;
34; X32-LABEL: TestFPExtF32_F128:
35; X32:       # %bb.0: # %entry
36; X32-NEXT:    pushl %esi
37; X32-NEXT:    subl $24, %esp
38; X32-NEXT:    flds vf32
39; X32-NEXT:    fstps {{[0-9]+}}(%esp)
40; X32-NEXT:    leal {{[0-9]+}}(%esp), %eax
41; X32-NEXT:    movl %eax, (%esp)
42; X32-NEXT:    calll __extendsftf2
43; X32-NEXT:    subl $4, %esp
44; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
45; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
46; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
47; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
48; X32-NEXT:    movl %esi, vf128+12
49; X32-NEXT:    movl %edx, vf128+8
50; X32-NEXT:    movl %ecx, vf128+4
51; X32-NEXT:    movl %eax, vf128
52; X32-NEXT:    addl $24, %esp
53; X32-NEXT:    popl %esi
54; X32-NEXT:    retl
55;
56; X64-AVX-LABEL: TestFPExtF32_F128:
57; X64-AVX:       # %bb.0: # %entry
58; X64-AVX-NEXT:    pushq %rax
59; X64-AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
60; X64-AVX-NEXT:    callq __extendsftf2@PLT
61; X64-AVX-NEXT:    vmovaps %xmm0, vf128(%rip)
62; X64-AVX-NEXT:    popq %rax
63; X64-AVX-NEXT:    retq
64entry:
65  %0 = load float, float* @vf32, align 4
66  %conv = fpext float %0 to fp128
67  store fp128 %conv, fp128* @vf128, align 16
68  ret void
69}
70
71define dso_local void @TestFPExtF64_F128() nounwind {
72; X64-SSE-LABEL: TestFPExtF64_F128:
73; X64-SSE:       # %bb.0: # %entry
74; X64-SSE-NEXT:    pushq %rax
75; X64-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
76; X64-SSE-NEXT:    callq __extenddftf2@PLT
77; X64-SSE-NEXT:    movaps %xmm0, vf128(%rip)
78; X64-SSE-NEXT:    popq %rax
79; X64-SSE-NEXT:    retq
80;
81; X32-LABEL: TestFPExtF64_F128:
82; X32:       # %bb.0: # %entry
83; X32-NEXT:    pushl %esi
84; X32-NEXT:    subl $40, %esp
85; X32-NEXT:    fldl vf64
86; X32-NEXT:    fstpl {{[0-9]+}}(%esp)
87; X32-NEXT:    leal {{[0-9]+}}(%esp), %eax
88; X32-NEXT:    movl %eax, (%esp)
89; X32-NEXT:    calll __extenddftf2
90; X32-NEXT:    subl $4, %esp
91; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
92; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
93; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
94; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
95; X32-NEXT:    movl %esi, vf128+12
96; X32-NEXT:    movl %edx, vf128+8
97; X32-NEXT:    movl %ecx, vf128+4
98; X32-NEXT:    movl %eax, vf128
99; X32-NEXT:    addl $40, %esp
100; X32-NEXT:    popl %esi
101; X32-NEXT:    retl
102;
103; X64-AVX-LABEL: TestFPExtF64_F128:
104; X64-AVX:       # %bb.0: # %entry
105; X64-AVX-NEXT:    pushq %rax
106; X64-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
107; X64-AVX-NEXT:    callq __extenddftf2@PLT
108; X64-AVX-NEXT:    vmovaps %xmm0, vf128(%rip)
109; X64-AVX-NEXT:    popq %rax
110; X64-AVX-NEXT:    retq
111entry:
112  %0 = load double, double* @vf64, align 8
113  %conv = fpext double %0 to fp128
114  store fp128 %conv, fp128* @vf128, align 16
115  ret void
116}
117
118define dso_local void @TestFPExtF80_F128() nounwind {
119; X64-SSE-LABEL: TestFPExtF80_F128:
120; X64-SSE:       # %bb.0: # %entry
121; X64-SSE-NEXT:    subq $24, %rsp
122; X64-SSE-NEXT:    fldt vf80(%rip)
123; X64-SSE-NEXT:    fstpt (%rsp)
124; X64-SSE-NEXT:    callq __extendxftf2@PLT
125; X64-SSE-NEXT:    movaps %xmm0, vf128(%rip)
126; X64-SSE-NEXT:    addq $24, %rsp
127; X64-SSE-NEXT:    retq
128;
129; X32-LABEL: TestFPExtF80_F128:
130; X32:       # %bb.0: # %entry
131; X32-NEXT:    pushl %esi
132; X32-NEXT:    subl $40, %esp
133; X32-NEXT:    fldt vf80
134; X32-NEXT:    fstpt {{[0-9]+}}(%esp)
135; X32-NEXT:    leal {{[0-9]+}}(%esp), %eax
136; X32-NEXT:    movl %eax, (%esp)
137; X32-NEXT:    calll __extendxftf2
138; X32-NEXT:    subl $4, %esp
139; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
140; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
141; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
142; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
143; X32-NEXT:    movl %esi, vf128+12
144; X32-NEXT:    movl %edx, vf128+8
145; X32-NEXT:    movl %ecx, vf128+4
146; X32-NEXT:    movl %eax, vf128
147; X32-NEXT:    addl $40, %esp
148; X32-NEXT:    popl %esi
149; X32-NEXT:    retl
150;
151; X64-AVX-LABEL: TestFPExtF80_F128:
152; X64-AVX:       # %bb.0: # %entry
153; X64-AVX-NEXT:    subq $24, %rsp
154; X64-AVX-NEXT:    fldt vf80(%rip)
155; X64-AVX-NEXT:    fstpt (%rsp)
156; X64-AVX-NEXT:    callq __extendxftf2@PLT
157; X64-AVX-NEXT:    vmovaps %xmm0, vf128(%rip)
158; X64-AVX-NEXT:    addq $24, %rsp
159; X64-AVX-NEXT:    retq
160entry:
161  %0 = load x86_fp80, x86_fp80* @vf80, align 8
162  %conv = fpext x86_fp80 %0 to fp128
163  store fp128 %conv, fp128* @vf128, align 16
164  ret void
165}
166
167define dso_local void @TestFPToSIF128_I16() nounwind {
168; X64-SSE-LABEL: TestFPToSIF128_I16:
169; X64-SSE:       # %bb.0: # %entry
170; X64-SSE-NEXT:    pushq %rax
171; X64-SSE-NEXT:    movaps vf128(%rip), %xmm0
172; X64-SSE-NEXT:    callq __fixtfsi@PLT
173; X64-SSE-NEXT:    movw %ax, vi16(%rip)
174; X64-SSE-NEXT:    popq %rax
175; X64-SSE-NEXT:    retq
176;
177; X32-LABEL: TestFPToSIF128_I16:
178; X32:       # %bb.0: # %entry
179; X32-NEXT:    subl $12, %esp
180; X32-NEXT:    pushl vf128+12
181; X32-NEXT:    pushl vf128+8
182; X32-NEXT:    pushl vf128+4
183; X32-NEXT:    pushl vf128
184; X32-NEXT:    calll __fixtfsi
185; X32-NEXT:    addl $16, %esp
186; X32-NEXT:    movw %ax, vi16
187; X32-NEXT:    addl $12, %esp
188; X32-NEXT:    retl
189;
190; X64-AVX-LABEL: TestFPToSIF128_I16:
191; X64-AVX:       # %bb.0: # %entry
192; X64-AVX-NEXT:    pushq %rax
193; X64-AVX-NEXT:    vmovaps vf128(%rip), %xmm0
194; X64-AVX-NEXT:    callq __fixtfsi@PLT
195; X64-AVX-NEXT:    movw %ax, vi16(%rip)
196; X64-AVX-NEXT:    popq %rax
197; X64-AVX-NEXT:    retq
198entry:
199  %0 = load fp128, fp128* @vf128, align 16
200  %conv = fptosi fp128 %0 to i16
201  store i16 %conv, i16* @vi16, align 2
202  ret void
203}
204
205define dso_local void @TestFPToUIF128_I16() nounwind {
206; X64-SSE-LABEL: TestFPToUIF128_I16:
207; X64-SSE:       # %bb.0: # %entry
208; X64-SSE-NEXT:    pushq %rax
209; X64-SSE-NEXT:    movaps vf128(%rip), %xmm0
210; X64-SSE-NEXT:    callq __fixtfsi@PLT
211; X64-SSE-NEXT:    movw %ax, vi16(%rip)
212; X64-SSE-NEXT:    popq %rax
213; X64-SSE-NEXT:    retq
214;
215; X32-LABEL: TestFPToUIF128_I16:
216; X32:       # %bb.0: # %entry
217; X32-NEXT:    subl $12, %esp
218; X32-NEXT:    pushl vf128+12
219; X32-NEXT:    pushl vf128+8
220; X32-NEXT:    pushl vf128+4
221; X32-NEXT:    pushl vf128
222; X32-NEXT:    calll __fixunstfsi
223; X32-NEXT:    addl $16, %esp
224; X32-NEXT:    movw %ax, vi16
225; X32-NEXT:    addl $12, %esp
226; X32-NEXT:    retl
227;
228; X64-AVX-LABEL: TestFPToUIF128_I16:
229; X64-AVX:       # %bb.0: # %entry
230; X64-AVX-NEXT:    pushq %rax
231; X64-AVX-NEXT:    vmovaps vf128(%rip), %xmm0
232; X64-AVX-NEXT:    callq __fixtfsi@PLT
233; X64-AVX-NEXT:    movw %ax, vi16(%rip)
234; X64-AVX-NEXT:    popq %rax
235; X64-AVX-NEXT:    retq
236entry:
237  %0 = load fp128, fp128* @vf128, align 16
238  %conv = fptoui fp128 %0 to i16
239  store i16 %conv, i16* @vi16, align 2
240  ret void
241}
242
243define dso_local void @TestFPToSIF128_I32() nounwind {
244; X64-SSE-LABEL: TestFPToSIF128_I32:
245; X64-SSE:       # %bb.0: # %entry
246; X64-SSE-NEXT:    pushq %rax
247; X64-SSE-NEXT:    movaps vf128(%rip), %xmm0
248; X64-SSE-NEXT:    callq __fixtfsi@PLT
249; X64-SSE-NEXT:    movl %eax, vi32(%rip)
250; X64-SSE-NEXT:    popq %rax
251; X64-SSE-NEXT:    retq
252;
253; X32-LABEL: TestFPToSIF128_I32:
254; X32:       # %bb.0: # %entry
255; X32-NEXT:    subl $12, %esp
256; X32-NEXT:    pushl vf128+12
257; X32-NEXT:    pushl vf128+8
258; X32-NEXT:    pushl vf128+4
259; X32-NEXT:    pushl vf128
260; X32-NEXT:    calll __fixtfsi
261; X32-NEXT:    addl $16, %esp
262; X32-NEXT:    movl %eax, vi32
263; X32-NEXT:    addl $12, %esp
264; X32-NEXT:    retl
265;
266; X64-AVX-LABEL: TestFPToSIF128_I32:
267; X64-AVX:       # %bb.0: # %entry
268; X64-AVX-NEXT:    pushq %rax
269; X64-AVX-NEXT:    vmovaps vf128(%rip), %xmm0
270; X64-AVX-NEXT:    callq __fixtfsi@PLT
271; X64-AVX-NEXT:    movl %eax, vi32(%rip)
272; X64-AVX-NEXT:    popq %rax
273; X64-AVX-NEXT:    retq
274entry:
275  %0 = load fp128, fp128* @vf128, align 16
276  %conv = fptosi fp128 %0 to i32
277  store i32 %conv, i32* @vi32, align 4
278  ret void
279}
280
281define dso_local void @TestFPToUIF128_U32() nounwind {
282; X64-SSE-LABEL: TestFPToUIF128_U32:
283; X64-SSE:       # %bb.0: # %entry
284; X64-SSE-NEXT:    pushq %rax
285; X64-SSE-NEXT:    movaps vf128(%rip), %xmm0
286; X64-SSE-NEXT:    callq __fixunstfsi@PLT
287; X64-SSE-NEXT:    movl %eax, vu32(%rip)
288; X64-SSE-NEXT:    popq %rax
289; X64-SSE-NEXT:    retq
290;
291; X32-LABEL: TestFPToUIF128_U32:
292; X32:       # %bb.0: # %entry
293; X32-NEXT:    subl $12, %esp
294; X32-NEXT:    pushl vf128+12
295; X32-NEXT:    pushl vf128+8
296; X32-NEXT:    pushl vf128+4
297; X32-NEXT:    pushl vf128
298; X32-NEXT:    calll __fixunstfsi
299; X32-NEXT:    addl $16, %esp
300; X32-NEXT:    movl %eax, vu32
301; X32-NEXT:    addl $12, %esp
302; X32-NEXT:    retl
303;
304; X64-AVX-LABEL: TestFPToUIF128_U32:
305; X64-AVX:       # %bb.0: # %entry
306; X64-AVX-NEXT:    pushq %rax
307; X64-AVX-NEXT:    vmovaps vf128(%rip), %xmm0
308; X64-AVX-NEXT:    callq __fixunstfsi@PLT
309; X64-AVX-NEXT:    movl %eax, vu32(%rip)
310; X64-AVX-NEXT:    popq %rax
311; X64-AVX-NEXT:    retq
312entry:
313  %0 = load fp128, fp128* @vf128, align 16
314  %conv = fptoui fp128 %0 to i32
315  store i32 %conv, i32* @vu32, align 4
316  ret void
317}
318
319define dso_local void @TestFPToSIF128_I64() nounwind {
320; X64-SSE-LABEL: TestFPToSIF128_I64:
321; X64-SSE:       # %bb.0: # %entry
322; X64-SSE-NEXT:    pushq %rax
323; X64-SSE-NEXT:    movaps vf128(%rip), %xmm0
324; X64-SSE-NEXT:    callq __fixtfsi@PLT
325; X64-SSE-NEXT:    cltq
326; X64-SSE-NEXT:    movq %rax, vi64(%rip)
327; X64-SSE-NEXT:    popq %rax
328; X64-SSE-NEXT:    retq
329;
330; X32-LABEL: TestFPToSIF128_I64:
331; X32:       # %bb.0: # %entry
332; X32-NEXT:    subl $12, %esp
333; X32-NEXT:    pushl vf128+12
334; X32-NEXT:    pushl vf128+8
335; X32-NEXT:    pushl vf128+4
336; X32-NEXT:    pushl vf128
337; X32-NEXT:    calll __fixtfsi
338; X32-NEXT:    addl $16, %esp
339; X32-NEXT:    movl %eax, vi64
340; X32-NEXT:    sarl $31, %eax
341; X32-NEXT:    movl %eax, vi64+4
342; X32-NEXT:    addl $12, %esp
343; X32-NEXT:    retl
344;
345; X64-AVX-LABEL: TestFPToSIF128_I64:
346; X64-AVX:       # %bb.0: # %entry
347; X64-AVX-NEXT:    pushq %rax
348; X64-AVX-NEXT:    vmovaps vf128(%rip), %xmm0
349; X64-AVX-NEXT:    callq __fixtfsi@PLT
350; X64-AVX-NEXT:    cltq
351; X64-AVX-NEXT:    movq %rax, vi64(%rip)
352; X64-AVX-NEXT:    popq %rax
353; X64-AVX-NEXT:    retq
354entry:
355  %0 = load fp128, fp128* @vf128, align 16
356  %conv = fptosi fp128 %0 to i32
357  %conv1 = sext i32 %conv to i64
358  store i64 %conv1, i64* @vi64, align 8
359  ret void
360}
361
362define dso_local void @TestFPToUIF128_U64() nounwind {
363; X64-SSE-LABEL: TestFPToUIF128_U64:
364; X64-SSE:       # %bb.0: # %entry
365; X64-SSE-NEXT:    pushq %rax
366; X64-SSE-NEXT:    movaps vf128(%rip), %xmm0
367; X64-SSE-NEXT:    callq __fixunstfsi@PLT
368; X64-SSE-NEXT:    movl %eax, %eax
369; X64-SSE-NEXT:    movq %rax, vu64(%rip)
370; X64-SSE-NEXT:    popq %rax
371; X64-SSE-NEXT:    retq
372;
373; X32-LABEL: TestFPToUIF128_U64:
374; X32:       # %bb.0: # %entry
375; X32-NEXT:    subl $12, %esp
376; X32-NEXT:    pushl vf128+12
377; X32-NEXT:    pushl vf128+8
378; X32-NEXT:    pushl vf128+4
379; X32-NEXT:    pushl vf128
380; X32-NEXT:    calll __fixunstfsi
381; X32-NEXT:    addl $16, %esp
382; X32-NEXT:    movl %eax, vu64
383; X32-NEXT:    movl $0, vu64+4
384; X32-NEXT:    addl $12, %esp
385; X32-NEXT:    retl
386;
387; X64-AVX-LABEL: TestFPToUIF128_U64:
388; X64-AVX:       # %bb.0: # %entry
389; X64-AVX-NEXT:    pushq %rax
390; X64-AVX-NEXT:    vmovaps vf128(%rip), %xmm0
391; X64-AVX-NEXT:    callq __fixunstfsi@PLT
392; X64-AVX-NEXT:    movl %eax, %eax
393; X64-AVX-NEXT:    movq %rax, vu64(%rip)
394; X64-AVX-NEXT:    popq %rax
395; X64-AVX-NEXT:    retq
396entry:
397  %0 = load fp128, fp128* @vf128, align 16
398  %conv = fptoui fp128 %0 to i32
399  %conv1 = zext i32 %conv to i64
400  store i64 %conv1, i64* @vu64, align 8
401  ret void
402}
403
404define dso_local void @TestFPToSIF128_I128() nounwind {
405; X64-SSE-LABEL: TestFPToSIF128_I128:
406; X64-SSE:       # %bb.0: # %entry
407; X64-SSE-NEXT:    pushq %rax
408; X64-SSE-NEXT:    movaps vf128(%rip), %xmm0
409; X64-SSE-NEXT:    callq __fixtfti@PLT
410; X64-SSE-NEXT:    movq %rdx, vi128+8(%rip)
411; X64-SSE-NEXT:    movq %rax, vi128(%rip)
412; X64-SSE-NEXT:    popq %rax
413; X64-SSE-NEXT:    retq
414;
415; X32-LABEL: TestFPToSIF128_I128:
416; X32:       # %bb.0: # %entry
417; X32-NEXT:    pushl %esi
418; X32-NEXT:    subl $36, %esp
419; X32-NEXT:    leal {{[0-9]+}}(%esp), %eax
420; X32-NEXT:    pushl vf128+12
421; X32-NEXT:    pushl vf128+8
422; X32-NEXT:    pushl vf128+4
423; X32-NEXT:    pushl vf128
424; X32-NEXT:    pushl %eax
425; X32-NEXT:    calll __fixtfti
426; X32-NEXT:    addl $28, %esp
427; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
428; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
429; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
430; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
431; X32-NEXT:    movl %esi, vi128+12
432; X32-NEXT:    movl %edx, vi128+8
433; X32-NEXT:    movl %ecx, vi128+4
434; X32-NEXT:    movl %eax, vi128
435; X32-NEXT:    addl $24, %esp
436; X32-NEXT:    popl %esi
437; X32-NEXT:    retl
438;
439; X64-AVX-LABEL: TestFPToSIF128_I128:
440; X64-AVX:       # %bb.0: # %entry
441; X64-AVX-NEXT:    pushq %rax
442; X64-AVX-NEXT:    vmovaps vf128(%rip), %xmm0
443; X64-AVX-NEXT:    callq __fixtfti@PLT
444; X64-AVX-NEXT:    movq %rdx, vi128+8(%rip)
445; X64-AVX-NEXT:    movq %rax, vi128(%rip)
446; X64-AVX-NEXT:    popq %rax
447; X64-AVX-NEXT:    retq
448entry:
449  %0 = load fp128, fp128* @vf128, align 16
450  %conv = fptosi fp128 %0 to i128
451  store i128 %conv, i128* @vi128, align 16
452  ret void
453}
454
455define dso_local void @TestFPToUIF128_U128() nounwind {
456; X64-SSE-LABEL: TestFPToUIF128_U128:
457; X64-SSE:       # %bb.0: # %entry
458; X64-SSE-NEXT:    pushq %rax
459; X64-SSE-NEXT:    movaps vf128(%rip), %xmm0
460; X64-SSE-NEXT:    callq __fixunstfti@PLT
461; X64-SSE-NEXT:    movq %rdx, vu128+8(%rip)
462; X64-SSE-NEXT:    movq %rax, vu128(%rip)
463; X64-SSE-NEXT:    popq %rax
464; X64-SSE-NEXT:    retq
465;
466; X32-LABEL: TestFPToUIF128_U128:
467; X32:       # %bb.0: # %entry
468; X32-NEXT:    pushl %esi
469; X32-NEXT:    subl $36, %esp
470; X32-NEXT:    leal {{[0-9]+}}(%esp), %eax
471; X32-NEXT:    pushl vf128+12
472; X32-NEXT:    pushl vf128+8
473; X32-NEXT:    pushl vf128+4
474; X32-NEXT:    pushl vf128
475; X32-NEXT:    pushl %eax
476; X32-NEXT:    calll __fixunstfti
477; X32-NEXT:    addl $28, %esp
478; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
479; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
480; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
481; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
482; X32-NEXT:    movl %esi, vu128+12
483; X32-NEXT:    movl %edx, vu128+8
484; X32-NEXT:    movl %ecx, vu128+4
485; X32-NEXT:    movl %eax, vu128
486; X32-NEXT:    addl $24, %esp
487; X32-NEXT:    popl %esi
488; X32-NEXT:    retl
489;
490; X64-AVX-LABEL: TestFPToUIF128_U128:
491; X64-AVX:       # %bb.0: # %entry
492; X64-AVX-NEXT:    pushq %rax
493; X64-AVX-NEXT:    vmovaps vf128(%rip), %xmm0
494; X64-AVX-NEXT:    callq __fixunstfti@PLT
495; X64-AVX-NEXT:    movq %rdx, vu128+8(%rip)
496; X64-AVX-NEXT:    movq %rax, vu128(%rip)
497; X64-AVX-NEXT:    popq %rax
498; X64-AVX-NEXT:    retq
499entry:
500  %0 = load fp128, fp128* @vf128, align 16
501  %conv = fptoui fp128 %0 to i128
502  store i128 %conv, i128* @vu128, align 16
503  ret void
504}
505
506define dso_local void @TestFPTruncF128_F32() nounwind {
507; X64-SSE-LABEL: TestFPTruncF128_F32:
508; X64-SSE:       # %bb.0: # %entry
509; X64-SSE-NEXT:    pushq %rax
510; X64-SSE-NEXT:    movaps vf128(%rip), %xmm0
511; X64-SSE-NEXT:    callq __trunctfsf2@PLT
512; X64-SSE-NEXT:    movss %xmm0, vf32(%rip)
513; X64-SSE-NEXT:    popq %rax
514; X64-SSE-NEXT:    retq
515;
516; X32-LABEL: TestFPTruncF128_F32:
517; X32:       # %bb.0: # %entry
518; X32-NEXT:    subl $12, %esp
519; X32-NEXT:    pushl vf128+12
520; X32-NEXT:    pushl vf128+8
521; X32-NEXT:    pushl vf128+4
522; X32-NEXT:    pushl vf128
523; X32-NEXT:    calll __trunctfsf2
524; X32-NEXT:    addl $16, %esp
525; X32-NEXT:    fstps vf32
526; X32-NEXT:    addl $12, %esp
527; X32-NEXT:    retl
528;
529; X64-AVX-LABEL: TestFPTruncF128_F32:
530; X64-AVX:       # %bb.0: # %entry
531; X64-AVX-NEXT:    pushq %rax
532; X64-AVX-NEXT:    vmovaps vf128(%rip), %xmm0
533; X64-AVX-NEXT:    callq __trunctfsf2@PLT
534; X64-AVX-NEXT:    vmovss %xmm0, vf32(%rip)
535; X64-AVX-NEXT:    popq %rax
536; X64-AVX-NEXT:    retq
537entry:
538  %0 = load fp128, fp128* @vf128, align 16
539  %conv = fptrunc fp128 %0 to float
540  store float %conv, float* @vf32, align 4
541  ret void
542}
543
544define dso_local void @TestFPTruncF128_F64() nounwind {
545; X64-SSE-LABEL: TestFPTruncF128_F64:
546; X64-SSE:       # %bb.0: # %entry
547; X64-SSE-NEXT:    pushq %rax
548; X64-SSE-NEXT:    movaps vf128(%rip), %xmm0
549; X64-SSE-NEXT:    callq __trunctfdf2@PLT
550; X64-SSE-NEXT:    movsd %xmm0, vf64(%rip)
551; X64-SSE-NEXT:    popq %rax
552; X64-SSE-NEXT:    retq
553;
554; X32-LABEL: TestFPTruncF128_F64:
555; X32:       # %bb.0: # %entry
556; X32-NEXT:    subl $12, %esp
557; X32-NEXT:    pushl vf128+12
558; X32-NEXT:    pushl vf128+8
559; X32-NEXT:    pushl vf128+4
560; X32-NEXT:    pushl vf128
561; X32-NEXT:    calll __trunctfdf2
562; X32-NEXT:    addl $16, %esp
563; X32-NEXT:    fstpl vf64
564; X32-NEXT:    addl $12, %esp
565; X32-NEXT:    retl
566;
567; X64-AVX-LABEL: TestFPTruncF128_F64:
568; X64-AVX:       # %bb.0: # %entry
569; X64-AVX-NEXT:    pushq %rax
570; X64-AVX-NEXT:    vmovaps vf128(%rip), %xmm0
571; X64-AVX-NEXT:    callq __trunctfdf2@PLT
572; X64-AVX-NEXT:    vmovsd %xmm0, vf64(%rip)
573; X64-AVX-NEXT:    popq %rax
574; X64-AVX-NEXT:    retq
575entry:
576  %0 = load fp128, fp128* @vf128, align 16
577  %conv = fptrunc fp128 %0 to double
578  store double %conv, double* @vf64, align 8
579  ret void
580}
581
582define dso_local void @TestFPTruncF128_F80() nounwind {
583; X64-SSE-LABEL: TestFPTruncF128_F80:
584; X64-SSE:       # %bb.0: # %entry
585; X64-SSE-NEXT:    pushq %rax
586; X64-SSE-NEXT:    movaps vf128(%rip), %xmm0
587; X64-SSE-NEXT:    callq __trunctfxf2@PLT
588; X64-SSE-NEXT:    fstpt vf80(%rip)
589; X64-SSE-NEXT:    popq %rax
590; X64-SSE-NEXT:    retq
591;
592; X32-LABEL: TestFPTruncF128_F80:
593; X32:       # %bb.0: # %entry
594; X32-NEXT:    subl $12, %esp
595; X32-NEXT:    pushl vf128+12
596; X32-NEXT:    pushl vf128+8
597; X32-NEXT:    pushl vf128+4
598; X32-NEXT:    pushl vf128
599; X32-NEXT:    calll __trunctfxf2
600; X32-NEXT:    addl $16, %esp
601; X32-NEXT:    fstpt vf80
602; X32-NEXT:    addl $12, %esp
603; X32-NEXT:    retl
604;
605; X64-AVX-LABEL: TestFPTruncF128_F80:
606; X64-AVX:       # %bb.0: # %entry
607; X64-AVX-NEXT:    pushq %rax
608; X64-AVX-NEXT:    vmovaps vf128(%rip), %xmm0
609; X64-AVX-NEXT:    callq __trunctfxf2@PLT
610; X64-AVX-NEXT:    fstpt vf80(%rip)
611; X64-AVX-NEXT:    popq %rax
612; X64-AVX-NEXT:    retq
613entry:
614  %0 = load fp128, fp128* @vf128, align 16
615  %conv = fptrunc fp128 %0 to x86_fp80
616  store x86_fp80 %conv, x86_fp80* @vf80, align 8
617  ret void
618}
619
620define dso_local void @TestSIToFPI16_F128() nounwind {
621; X64-SSE-LABEL: TestSIToFPI16_F128:
622; X64-SSE:       # %bb.0: # %entry
623; X64-SSE-NEXT:    pushq %rax
624; X64-SSE-NEXT:    movswl vi16(%rip), %edi
625; X64-SSE-NEXT:    callq __floatsitf@PLT
626; X64-SSE-NEXT:    movaps %xmm0, vf128(%rip)
627; X64-SSE-NEXT:    popq %rax
628; X64-SSE-NEXT:    retq
629;
630; X32-LABEL: TestSIToFPI16_F128:
631; X32:       # %bb.0: # %entry
632; X32-NEXT:    pushl %esi
633; X32-NEXT:    subl $24, %esp
634; X32-NEXT:    movswl vi16, %eax
635; X32-NEXT:    subl $8, %esp
636; X32-NEXT:    leal {{[0-9]+}}(%esp), %ecx
637; X32-NEXT:    pushl %eax
638; X32-NEXT:    pushl %ecx
639; X32-NEXT:    calll __floatsitf
640; X32-NEXT:    addl $12, %esp
641; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
642; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
643; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
644; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
645; X32-NEXT:    movl %esi, vf128+12
646; X32-NEXT:    movl %edx, vf128+8
647; X32-NEXT:    movl %ecx, vf128+4
648; X32-NEXT:    movl %eax, vf128
649; X32-NEXT:    addl $24, %esp
650; X32-NEXT:    popl %esi
651; X32-NEXT:    retl
652;
653; X64-AVX-LABEL: TestSIToFPI16_F128:
654; X64-AVX:       # %bb.0: # %entry
655; X64-AVX-NEXT:    pushq %rax
656; X64-AVX-NEXT:    movswl vi16(%rip), %edi
657; X64-AVX-NEXT:    callq __floatsitf@PLT
658; X64-AVX-NEXT:    vmovaps %xmm0, vf128(%rip)
659; X64-AVX-NEXT:    popq %rax
660; X64-AVX-NEXT:    retq
661entry:
662  %0 = load i16, i16* @vi16, align 4
663  %conv = sitofp i16 %0 to fp128
664  store fp128 %conv, fp128* @vf128, align 16
665  ret void
666}
667
668define dso_local void @TestSIToFPU16_F128() nounwind {
669; X64-SSE-LABEL: TestSIToFPU16_F128:
670; X64-SSE:       # %bb.0: # %entry
671; X64-SSE-NEXT:    pushq %rax
672; X64-SSE-NEXT:    movzwl vi16(%rip), %edi
673; X64-SSE-NEXT:    callq __floatsitf@PLT
674; X64-SSE-NEXT:    movaps %xmm0, vf128(%rip)
675; X64-SSE-NEXT:    popq %rax
676; X64-SSE-NEXT:    retq
677;
678; X32-LABEL: TestSIToFPU16_F128:
679; X32:       # %bb.0: # %entry
680; X32-NEXT:    pushl %esi
681; X32-NEXT:    subl $24, %esp
682; X32-NEXT:    movzwl vi16, %eax
683; X32-NEXT:    subl $8, %esp
684; X32-NEXT:    leal {{[0-9]+}}(%esp), %ecx
685; X32-NEXT:    pushl %eax
686; X32-NEXT:    pushl %ecx
687; X32-NEXT:    calll __floatunsitf
688; X32-NEXT:    addl $12, %esp
689; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
690; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
691; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
692; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
693; X32-NEXT:    movl %esi, vf128+12
694; X32-NEXT:    movl %edx, vf128+8
695; X32-NEXT:    movl %ecx, vf128+4
696; X32-NEXT:    movl %eax, vf128
697; X32-NEXT:    addl $24, %esp
698; X32-NEXT:    popl %esi
699; X32-NEXT:    retl
700;
701; X64-AVX-LABEL: TestSIToFPU16_F128:
702; X64-AVX:       # %bb.0: # %entry
703; X64-AVX-NEXT:    pushq %rax
704; X64-AVX-NEXT:    movzwl vi16(%rip), %edi
705; X64-AVX-NEXT:    callq __floatsitf@PLT
706; X64-AVX-NEXT:    vmovaps %xmm0, vf128(%rip)
707; X64-AVX-NEXT:    popq %rax
708; X64-AVX-NEXT:    retq
709entry:
710  %0 = load i16, i16* @vi16, align 4
711  %conv = uitofp i16 %0 to fp128
712  store fp128 %conv, fp128* @vf128, align 16
713  ret void
714}
715
716define dso_local void @TestSIToFPI32_F128() nounwind {
717; X64-SSE-LABEL: TestSIToFPI32_F128:
718; X64-SSE:       # %bb.0: # %entry
719; X64-SSE-NEXT:    pushq %rax
720; X64-SSE-NEXT:    movl vi32(%rip), %edi
721; X64-SSE-NEXT:    callq __floatsitf@PLT
722; X64-SSE-NEXT:    movaps %xmm0, vf128(%rip)
723; X64-SSE-NEXT:    popq %rax
724; X64-SSE-NEXT:    retq
725;
726; X32-LABEL: TestSIToFPI32_F128:
727; X32:       # %bb.0: # %entry
728; X32-NEXT:    pushl %esi
729; X32-NEXT:    subl $32, %esp
730; X32-NEXT:    leal {{[0-9]+}}(%esp), %eax
731; X32-NEXT:    pushl vi32
732; X32-NEXT:    pushl %eax
733; X32-NEXT:    calll __floatsitf
734; X32-NEXT:    addl $12, %esp
735; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
736; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
737; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
738; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
739; X32-NEXT:    movl %esi, vf128+12
740; X32-NEXT:    movl %edx, vf128+8
741; X32-NEXT:    movl %ecx, vf128+4
742; X32-NEXT:    movl %eax, vf128
743; X32-NEXT:    addl $24, %esp
744; X32-NEXT:    popl %esi
745; X32-NEXT:    retl
746;
747; X64-AVX-LABEL: TestSIToFPI32_F128:
748; X64-AVX:       # %bb.0: # %entry
749; X64-AVX-NEXT:    pushq %rax
750; X64-AVX-NEXT:    movl vi32(%rip), %edi
751; X64-AVX-NEXT:    callq __floatsitf@PLT
752; X64-AVX-NEXT:    vmovaps %xmm0, vf128(%rip)
753; X64-AVX-NEXT:    popq %rax
754; X64-AVX-NEXT:    retq
755entry:
756  %0 = load i32, i32* @vi32, align 4
757  %conv = sitofp i32 %0 to fp128
758  store fp128 %conv, fp128* @vf128, align 16
759  ret void
760}
761
762define dso_local void @TestUIToFPU32_F128() #2 {
763; X64-SSE-LABEL: TestUIToFPU32_F128:
764; X64-SSE:       # %bb.0: # %entry
765; X64-SSE-NEXT:    pushq %rax
766; X64-SSE-NEXT:    movl vu32(%rip), %edi
767; X64-SSE-NEXT:    callq __floatunsitf@PLT
768; X64-SSE-NEXT:    movaps %xmm0, vf128(%rip)
769; X64-SSE-NEXT:    popq %rax
770; X64-SSE-NEXT:    retq
771;
772; X32-LABEL: TestUIToFPU32_F128:
773; X32:       # %bb.0: # %entry
774; X32-NEXT:    pushl %esi
775; X32-NEXT:    subl $32, %esp
776; X32-NEXT:    leal {{[0-9]+}}(%esp), %eax
777; X32-NEXT:    pushl vu32
778; X32-NEXT:    pushl %eax
779; X32-NEXT:    calll __floatunsitf
780; X32-NEXT:    addl $12, %esp
781; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
782; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
783; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
784; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
785; X32-NEXT:    movl %esi, vf128+12
786; X32-NEXT:    movl %edx, vf128+8
787; X32-NEXT:    movl %ecx, vf128+4
788; X32-NEXT:    movl %eax, vf128
789; X32-NEXT:    addl $24, %esp
790; X32-NEXT:    popl %esi
791; X32-NEXT:    retl
792;
793; X64-AVX-LABEL: TestUIToFPU32_F128:
794; X64-AVX:       # %bb.0: # %entry
795; X64-AVX-NEXT:    pushq %rax
796; X64-AVX-NEXT:    movl vu32(%rip), %edi
797; X64-AVX-NEXT:    callq __floatunsitf@PLT
798; X64-AVX-NEXT:    vmovaps %xmm0, vf128(%rip)
799; X64-AVX-NEXT:    popq %rax
800; X64-AVX-NEXT:    retq
801entry:
802  %0 = load i32, i32* @vu32, align 4
803  %conv = uitofp i32 %0 to fp128
804  store fp128 %conv, fp128* @vf128, align 16
805  ret void
806}
807
808define dso_local void @TestSIToFPI64_F128() nounwind {
809; X64-SSE-LABEL: TestSIToFPI64_F128:
810; X64-SSE:       # %bb.0: # %entry
811; X64-SSE-NEXT:    pushq %rax
812; X64-SSE-NEXT:    movq vi64(%rip), %rdi
813; X64-SSE-NEXT:    callq __floatditf@PLT
814; X64-SSE-NEXT:    movaps %xmm0, vf128(%rip)
815; X64-SSE-NEXT:    popq %rax
816; X64-SSE-NEXT:    retq
817;
818; X32-LABEL: TestSIToFPI64_F128:
819; X32:       # %bb.0: # %entry
820; X32-NEXT:    pushl %esi
821; X32-NEXT:    subl $28, %esp
822; X32-NEXT:    leal {{[0-9]+}}(%esp), %eax
823; X32-NEXT:    pushl vi64+4
824; X32-NEXT:    pushl vi64
825; X32-NEXT:    pushl %eax
826; X32-NEXT:    calll __floatditf
827; X32-NEXT:    addl $12, %esp
828; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
829; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
830; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
831; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
832; X32-NEXT:    movl %esi, vf128+12
833; X32-NEXT:    movl %edx, vf128+8
834; X32-NEXT:    movl %ecx, vf128+4
835; X32-NEXT:    movl %eax, vf128
836; X32-NEXT:    addl $24, %esp
837; X32-NEXT:    popl %esi
838; X32-NEXT:    retl
839;
840; X64-AVX-LABEL: TestSIToFPI64_F128:
841; X64-AVX:       # %bb.0: # %entry
842; X64-AVX-NEXT:    pushq %rax
843; X64-AVX-NEXT:    movq vi64(%rip), %rdi
844; X64-AVX-NEXT:    callq __floatditf@PLT
845; X64-AVX-NEXT:    vmovaps %xmm0, vf128(%rip)
846; X64-AVX-NEXT:    popq %rax
847; X64-AVX-NEXT:    retq
848entry:
849  %0 = load i64, i64* @vi64, align 8
850  %conv = sitofp i64 %0 to fp128
851  store fp128 %conv, fp128* @vf128, align 16
852  ret void
853}
854
855define dso_local void @TestUIToFPU64_F128() #2 {
856; X64-SSE-LABEL: TestUIToFPU64_F128:
857; X64-SSE:       # %bb.0: # %entry
858; X64-SSE-NEXT:    pushq %rax
859; X64-SSE-NEXT:    movq vu64(%rip), %rdi
860; X64-SSE-NEXT:    callq __floatunditf@PLT
861; X64-SSE-NEXT:    movaps %xmm0, vf128(%rip)
862; X64-SSE-NEXT:    popq %rax
863; X64-SSE-NEXT:    retq
864;
865; X32-LABEL: TestUIToFPU64_F128:
866; X32:       # %bb.0: # %entry
867; X32-NEXT:    pushl %esi
868; X32-NEXT:    subl $28, %esp
869; X32-NEXT:    leal {{[0-9]+}}(%esp), %eax
870; X32-NEXT:    pushl vu64+4
871; X32-NEXT:    pushl vu64
872; X32-NEXT:    pushl %eax
873; X32-NEXT:    calll __floatunditf
874; X32-NEXT:    addl $12, %esp
875; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
876; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
877; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
878; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
879; X32-NEXT:    movl %esi, vf128+12
880; X32-NEXT:    movl %edx, vf128+8
881; X32-NEXT:    movl %ecx, vf128+4
882; X32-NEXT:    movl %eax, vf128
883; X32-NEXT:    addl $24, %esp
884; X32-NEXT:    popl %esi
885; X32-NEXT:    retl
886;
887; X64-AVX-LABEL: TestUIToFPU64_F128:
888; X64-AVX:       # %bb.0: # %entry
889; X64-AVX-NEXT:    pushq %rax
890; X64-AVX-NEXT:    movq vu64(%rip), %rdi
891; X64-AVX-NEXT:    callq __floatunditf@PLT
892; X64-AVX-NEXT:    vmovaps %xmm0, vf128(%rip)
893; X64-AVX-NEXT:    popq %rax
894; X64-AVX-NEXT:    retq
895entry:
896  %0 = load i64, i64* @vu64, align 8
897  %conv = uitofp i64 %0 to fp128
898  store fp128 %conv, fp128* @vf128, align 16
899  ret void
900}
901
902define dso_local void @TestSIToFPI128_F128() nounwind {
903; X64-SSE-LABEL: TestSIToFPI128_F128:
904; X64-SSE:       # %bb.0: # %entry
905; X64-SSE-NEXT:    pushq %rax
906; X64-SSE-NEXT:    movq vi128(%rip), %rdi
907; X64-SSE-NEXT:    movq vi128+8(%rip), %rsi
908; X64-SSE-NEXT:    callq __floattitf@PLT
909; X64-SSE-NEXT:    movaps %xmm0, vf128(%rip)
910; X64-SSE-NEXT:    popq %rax
911; X64-SSE-NEXT:    retq
912;
913; X32-LABEL: TestSIToFPI128_F128:
914; X32:       # %bb.0: # %entry
915; X32-NEXT:    pushl %esi
916; X32-NEXT:    subl $36, %esp
917; X32-NEXT:    leal {{[0-9]+}}(%esp), %eax
918; X32-NEXT:    pushl vi128+12
919; X32-NEXT:    pushl vi128+8
920; X32-NEXT:    pushl vi128+4
921; X32-NEXT:    pushl vi128
922; X32-NEXT:    pushl %eax
923; X32-NEXT:    calll __floattitf
924; X32-NEXT:    addl $28, %esp
925; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
926; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
927; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
928; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
929; X32-NEXT:    movl %esi, vf128+12
930; X32-NEXT:    movl %edx, vf128+8
931; X32-NEXT:    movl %ecx, vf128+4
932; X32-NEXT:    movl %eax, vf128
933; X32-NEXT:    addl $24, %esp
934; X32-NEXT:    popl %esi
935; X32-NEXT:    retl
936;
937; X64-AVX-LABEL: TestSIToFPI128_F128:
938; X64-AVX:       # %bb.0: # %entry
939; X64-AVX-NEXT:    pushq %rax
940; X64-AVX-NEXT:    movq vi128(%rip), %rdi
941; X64-AVX-NEXT:    movq vi128+8(%rip), %rsi
942; X64-AVX-NEXT:    callq __floattitf@PLT
943; X64-AVX-NEXT:    vmovaps %xmm0, vf128(%rip)
944; X64-AVX-NEXT:    popq %rax
945; X64-AVX-NEXT:    retq
946entry:
947  %0 = load i128, i128* @vi128, align 16
948  %conv = sitofp i128 %0 to fp128
949  store fp128 %conv, fp128* @vf128, align 16
950  ret void
951}
952
953define dso_local void @TestUIToFPU128_F128() #2 {
954; X64-SSE-LABEL: TestUIToFPU128_F128:
955; X64-SSE:       # %bb.0: # %entry
956; X64-SSE-NEXT:    pushq %rax
957; X64-SSE-NEXT:    movq vu128(%rip), %rdi
958; X64-SSE-NEXT:    movq vu128+8(%rip), %rsi
959; X64-SSE-NEXT:    callq __floatuntitf@PLT
960; X64-SSE-NEXT:    movaps %xmm0, vf128(%rip)
961; X64-SSE-NEXT:    popq %rax
962; X64-SSE-NEXT:    retq
963;
964; X32-LABEL: TestUIToFPU128_F128:
965; X32:       # %bb.0: # %entry
966; X32-NEXT:    pushl %esi
967; X32-NEXT:    subl $36, %esp
968; X32-NEXT:    leal {{[0-9]+}}(%esp), %eax
969; X32-NEXT:    pushl vu128+12
970; X32-NEXT:    pushl vu128+8
971; X32-NEXT:    pushl vu128+4
972; X32-NEXT:    pushl vu128
973; X32-NEXT:    pushl %eax
974; X32-NEXT:    calll __floatuntitf
975; X32-NEXT:    addl $28, %esp
976; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
977; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
978; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
979; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
980; X32-NEXT:    movl %esi, vf128+12
981; X32-NEXT:    movl %edx, vf128+8
982; X32-NEXT:    movl %ecx, vf128+4
983; X32-NEXT:    movl %eax, vf128
984; X32-NEXT:    addl $24, %esp
985; X32-NEXT:    popl %esi
986; X32-NEXT:    retl
987;
988; X64-AVX-LABEL: TestUIToFPU128_F128:
989; X64-AVX:       # %bb.0: # %entry
990; X64-AVX-NEXT:    pushq %rax
991; X64-AVX-NEXT:    movq vu128(%rip), %rdi
992; X64-AVX-NEXT:    movq vu128+8(%rip), %rsi
993; X64-AVX-NEXT:    callq __floatuntitf@PLT
994; X64-AVX-NEXT:    vmovaps %xmm0, vf128(%rip)
995; X64-AVX-NEXT:    popq %rax
996; X64-AVX-NEXT:    retq
997entry:
998  %0 = load i128, i128* @vu128, align 16
999  %conv = uitofp i128 %0 to fp128
1000  store fp128 %conv, fp128* @vf128, align 16
1001  ret void
1002}
1003
1004define dso_local i32 @TestConst128(fp128 %v) nounwind {
1005; X64-SSE-LABEL: TestConst128:
1006; X64-SSE:       # %bb.0: # %entry
1007; X64-SSE-NEXT:    pushq %rax
1008; X64-SSE-NEXT:    movaps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
1009; X64-SSE-NEXT:    callq __gttf2@PLT
1010; X64-SSE-NEXT:    xorl %ecx, %ecx
1011; X64-SSE-NEXT:    testl %eax, %eax
1012; X64-SSE-NEXT:    setg %cl
1013; X64-SSE-NEXT:    movl %ecx, %eax
1014; X64-SSE-NEXT:    popq %rcx
1015; X64-SSE-NEXT:    retq
1016;
1017; X32-LABEL: TestConst128:
1018; X32:       # %bb.0: # %entry
1019; X32-NEXT:    subl $12, %esp
1020; X32-NEXT:    pushl $1073676288 # imm = 0x3FFF0000
1021; X32-NEXT:    pushl $0
1022; X32-NEXT:    pushl $0
1023; X32-NEXT:    pushl $0
1024; X32-NEXT:    pushl {{[0-9]+}}(%esp)
1025; X32-NEXT:    pushl {{[0-9]+}}(%esp)
1026; X32-NEXT:    pushl {{[0-9]+}}(%esp)
1027; X32-NEXT:    pushl {{[0-9]+}}(%esp)
1028; X32-NEXT:    calll __gttf2
1029; X32-NEXT:    addl $32, %esp
1030; X32-NEXT:    xorl %ecx, %ecx
1031; X32-NEXT:    testl %eax, %eax
1032; X32-NEXT:    setg %cl
1033; X32-NEXT:    movl %ecx, %eax
1034; X32-NEXT:    addl $12, %esp
1035; X32-NEXT:    retl
1036;
1037; X64-AVX-LABEL: TestConst128:
1038; X64-AVX:       # %bb.0: # %entry
1039; X64-AVX-NEXT:    pushq %rax
1040; X64-AVX-NEXT:    vmovaps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
1041; X64-AVX-NEXT:    callq __gttf2@PLT
1042; X64-AVX-NEXT:    xorl %ecx, %ecx
1043; X64-AVX-NEXT:    testl %eax, %eax
1044; X64-AVX-NEXT:    setg %cl
1045; X64-AVX-NEXT:    movl %ecx, %eax
1046; X64-AVX-NEXT:    popq %rcx
1047; X64-AVX-NEXT:    retq
1048entry:
1049  %cmp = fcmp ogt fp128 %v, 0xL00000000000000003FFF000000000000
1050  %conv = zext i1 %cmp to i32
1051  ret i32 %conv
1052}
1053
1054
1055define dso_local i32 @TestConst128Zero(fp128 %v) nounwind {
1056; X64-SSE-LABEL: TestConst128Zero:
1057; X64-SSE:       # %bb.0: # %entry
1058; X64-SSE-NEXT:    pushq %rax
1059; X64-SSE-NEXT:    xorps %xmm1, %xmm1
1060; X64-SSE-NEXT:    callq __gttf2@PLT
1061; X64-SSE-NEXT:    xorl %ecx, %ecx
1062; X64-SSE-NEXT:    testl %eax, %eax
1063; X64-SSE-NEXT:    setg %cl
1064; X64-SSE-NEXT:    movl %ecx, %eax
1065; X64-SSE-NEXT:    popq %rcx
1066; X64-SSE-NEXT:    retq
1067;
1068; X32-LABEL: TestConst128Zero:
1069; X32:       # %bb.0: # %entry
1070; X32-NEXT:    subl $12, %esp
1071; X32-NEXT:    pushl $0
1072; X32-NEXT:    pushl $0
1073; X32-NEXT:    pushl $0
1074; X32-NEXT:    pushl $0
1075; X32-NEXT:    pushl {{[0-9]+}}(%esp)
1076; X32-NEXT:    pushl {{[0-9]+}}(%esp)
1077; X32-NEXT:    pushl {{[0-9]+}}(%esp)
1078; X32-NEXT:    pushl {{[0-9]+}}(%esp)
1079; X32-NEXT:    calll __gttf2
1080; X32-NEXT:    addl $32, %esp
1081; X32-NEXT:    xorl %ecx, %ecx
1082; X32-NEXT:    testl %eax, %eax
1083; X32-NEXT:    setg %cl
1084; X32-NEXT:    movl %ecx, %eax
1085; X32-NEXT:    addl $12, %esp
1086; X32-NEXT:    retl
1087;
1088; X64-AVX-LABEL: TestConst128Zero:
1089; X64-AVX:       # %bb.0: # %entry
1090; X64-AVX-NEXT:    pushq %rax
1091; X64-AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
1092; X64-AVX-NEXT:    callq __gttf2@PLT
1093; X64-AVX-NEXT:    xorl %ecx, %ecx
1094; X64-AVX-NEXT:    testl %eax, %eax
1095; X64-AVX-NEXT:    setg %cl
1096; X64-AVX-NEXT:    movl %ecx, %eax
1097; X64-AVX-NEXT:    popq %rcx
1098; X64-AVX-NEXT:    retq
1099entry:
1100  %cmp = fcmp ogt fp128 %v, 0xL00000000000000000000000000000000
1101  %conv = zext i1 %cmp to i32
1102  ret i32 %conv
1103}
1104
1105; C code:
1106;  struct TestBits_ieee_ext {
1107;    unsigned v1;
1108;    unsigned v2;
1109; };
1110; union TestBits_LDU {
1111;   FP128 ld;
1112;   struct TestBits_ieee_ext bits;
1113; };
1114; int TestBits128(FP128 ld) {
1115;   union TestBits_LDU u;
1116;   u.ld = ld * ld;
1117;   return ((u.bits.v1 | u.bits.v2)  == 0);
1118; }
1119define dso_local i32 @TestBits128(fp128 %ld) nounwind {
1120; X64-SSE-LABEL: TestBits128:
1121; X64-SSE:       # %bb.0: # %entry
1122; X64-SSE-NEXT:    subq $24, %rsp
1123; X64-SSE-NEXT:    movaps %xmm0, %xmm1
1124; X64-SSE-NEXT:    callq __multf3@PLT
1125; X64-SSE-NEXT:    movaps %xmm0, (%rsp)
1126; X64-SSE-NEXT:    movq (%rsp), %rcx
1127; X64-SSE-NEXT:    movq %rcx, %rdx
1128; X64-SSE-NEXT:    shrq $32, %rdx
1129; X64-SSE-NEXT:    xorl %eax, %eax
1130; X64-SSE-NEXT:    orl %ecx, %edx
1131; X64-SSE-NEXT:    sete %al
1132; X64-SSE-NEXT:    addq $24, %rsp
1133; X64-SSE-NEXT:    retq
1134;
1135; X32-LABEL: TestBits128:
1136; X32:       # %bb.0: # %entry
1137; X32-NEXT:    pushl %edi
1138; X32-NEXT:    pushl %esi
1139; X32-NEXT:    subl $20, %esp
1140; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
1141; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
1142; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
1143; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
1144; X32-NEXT:    subl $12, %esp
1145; X32-NEXT:    leal {{[0-9]+}}(%esp), %edi
1146; X32-NEXT:    pushl %esi
1147; X32-NEXT:    pushl %edx
1148; X32-NEXT:    pushl %ecx
1149; X32-NEXT:    pushl %eax
1150; X32-NEXT:    pushl %esi
1151; X32-NEXT:    pushl %edx
1152; X32-NEXT:    pushl %ecx
1153; X32-NEXT:    pushl %eax
1154; X32-NEXT:    pushl %edi
1155; X32-NEXT:    calll __multf3
1156; X32-NEXT:    addl $44, %esp
1157; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
1158; X32-NEXT:    xorl %eax, %eax
1159; X32-NEXT:    orl (%esp), %ecx
1160; X32-NEXT:    sete %al
1161; X32-NEXT:    addl $20, %esp
1162; X32-NEXT:    popl %esi
1163; X32-NEXT:    popl %edi
1164; X32-NEXT:    retl
1165;
1166; X64-AVX-LABEL: TestBits128:
1167; X64-AVX:       # %bb.0: # %entry
1168; X64-AVX-NEXT:    subq $24, %rsp
1169; X64-AVX-NEXT:    vmovaps %xmm0, %xmm1
1170; X64-AVX-NEXT:    callq __multf3@PLT
1171; X64-AVX-NEXT:    vmovaps %xmm0, (%rsp)
1172; X64-AVX-NEXT:    movq (%rsp), %rcx
1173; X64-AVX-NEXT:    movq %rcx, %rdx
1174; X64-AVX-NEXT:    shrq $32, %rdx
1175; X64-AVX-NEXT:    xorl %eax, %eax
1176; X64-AVX-NEXT:    orl %ecx, %edx
1177; X64-AVX-NEXT:    sete %al
1178; X64-AVX-NEXT:    addq $24, %rsp
1179; X64-AVX-NEXT:    retq
1180entry:
1181  %mul = fmul fp128 %ld, %ld
1182  %0 = bitcast fp128 %mul to i128
1183  %shift = lshr i128 %0, 32
1184  %or5 = or i128 %shift, %0
1185  %or = trunc i128 %or5 to i32
1186  %cmp = icmp eq i32 %or, 0
1187  %conv = zext i1 %cmp to i32
1188  ret i32 %conv
1189; If TestBits128 fails due to any llvm or clang change,
1190; please make sure the original simplified C code will
1191; be compiled into correct IL and assembly code, not
1192; just this TestBits128 test case. Better yet, try to
1193; test the whole libm and its test cases.
1194}
1195
1196; C code: (compiled with -target x86_64-linux-android)
1197; typedef long double __float128;
1198; __float128 TestPair128(unsigned long a, unsigned long b) {
1199;   unsigned __int128 n;
1200;   unsigned __int128 v1 = ((unsigned __int128)a << 64);
1201;   unsigned __int128 v2 = (unsigned __int128)b;
1202;   n = (v1 | v2) + 3;
1203;   return *(__float128*)&n;
1204; }
1205define fp128 @TestPair128(i64 %a, i64 %b) nounwind {
1206; X64-SSE-LABEL: TestPair128:
1207; X64-SSE:       # %bb.0: # %entry
1208; X64-SSE-NEXT:    addq $3, %rsi
1209; X64-SSE-NEXT:    movq %rsi, -{{[0-9]+}}(%rsp)
1210; X64-SSE-NEXT:    adcq $0, %rdi
1211; X64-SSE-NEXT:    movq %rdi, -{{[0-9]+}}(%rsp)
1212; X64-SSE-NEXT:    movaps -{{[0-9]+}}(%rsp), %xmm0
1213; X64-SSE-NEXT:    retq
1214;
1215; X32-LABEL: TestPair128:
1216; X32:       # %bb.0: # %entry
1217; X32-NEXT:    pushl %edi
1218; X32-NEXT:    pushl %esi
1219; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
1220; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
1221; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
1222; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
1223; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
1224; X32-NEXT:    addl $3, %ecx
1225; X32-NEXT:    adcl $0, %edx
1226; X32-NEXT:    adcl $0, %esi
1227; X32-NEXT:    adcl $0, %edi
1228; X32-NEXT:    movl %edx, 4(%eax)
1229; X32-NEXT:    movl %ecx, (%eax)
1230; X32-NEXT:    movl %esi, 8(%eax)
1231; X32-NEXT:    movl %edi, 12(%eax)
1232; X32-NEXT:    popl %esi
1233; X32-NEXT:    popl %edi
1234; X32-NEXT:    retl $4
1235;
1236; X64-AVX-LABEL: TestPair128:
1237; X64-AVX:       # %bb.0: # %entry
1238; X64-AVX-NEXT:    addq $3, %rsi
1239; X64-AVX-NEXT:    movq %rsi, -{{[0-9]+}}(%rsp)
1240; X64-AVX-NEXT:    adcq $0, %rdi
1241; X64-AVX-NEXT:    movq %rdi, -{{[0-9]+}}(%rsp)
1242; X64-AVX-NEXT:    vmovaps -{{[0-9]+}}(%rsp), %xmm0
1243; X64-AVX-NEXT:    retq
1244entry:
1245  %conv = zext i64 %a to i128
1246  %shl = shl nuw i128 %conv, 64
1247  %conv1 = zext i64 %b to i128
1248  %or = or i128 %shl, %conv1
1249  %add = add i128 %or, 3
1250  %0 = bitcast i128 %add to fp128
1251  ret fp128 %0
1252}
1253
1254define fp128 @TestTruncCopysign(fp128 %x, i32 %n) nounwind {
1255; X64-SSE-LABEL: TestTruncCopysign:
1256; X64-SSE:       # %bb.0: # %entry
1257; X64-SSE-NEXT:    cmpl $50001, %edi # imm = 0xC351
1258; X64-SSE-NEXT:    jl .LBB26_2
1259; X64-SSE-NEXT:  # %bb.1: # %if.then
1260; X64-SSE-NEXT:    pushq %rax
1261; X64-SSE-NEXT:    callq __trunctfdf2@PLT
1262; X64-SSE-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
1263; X64-SSE-NEXT:    orps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
1264; X64-SSE-NEXT:    callq __extenddftf2@PLT
1265; X64-SSE-NEXT:    addq $8, %rsp
1266; X64-SSE-NEXT:  .LBB26_2: # %cleanup
1267; X64-SSE-NEXT:    retq
1268;
1269; X32-LABEL: TestTruncCopysign:
1270; X32:       # %bb.0: # %entry
1271; X32-NEXT:    pushl %edi
1272; X32-NEXT:    pushl %esi
1273; X32-NEXT:    subl $36, %esp
1274; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
1275; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
1276; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
1277; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
1278; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
1279; X32-NEXT:    cmpl $50001, {{[0-9]+}}(%esp) # imm = 0xC351
1280; X32-NEXT:    jl .LBB26_4
1281; X32-NEXT:  # %bb.1: # %if.then
1282; X32-NEXT:    pushl %eax
1283; X32-NEXT:    pushl %ecx
1284; X32-NEXT:    pushl %edi
1285; X32-NEXT:    pushl %edx
1286; X32-NEXT:    calll __trunctfdf2
1287; X32-NEXT:    addl $16, %esp
1288; X32-NEXT:    fstpl {{[0-9]+}}(%esp)
1289; X32-NEXT:    testb $-128, {{[0-9]+}}(%esp)
1290; X32-NEXT:    flds {{\.?LCPI[0-9]+_[0-9]+}}
1291; X32-NEXT:    flds {{\.?LCPI[0-9]+_[0-9]+}}
1292; X32-NEXT:    jne .LBB26_3
1293; X32-NEXT:  # %bb.2: # %if.then
1294; X32-NEXT:    fstp %st(1)
1295; X32-NEXT:    fldz
1296; X32-NEXT:  .LBB26_3: # %if.then
1297; X32-NEXT:    fstp %st(0)
1298; X32-NEXT:    subl $16, %esp
1299; X32-NEXT:    leal {{[0-9]+}}(%esp), %eax
1300; X32-NEXT:    movl %eax, (%esp)
1301; X32-NEXT:    fstpl {{[0-9]+}}(%esp)
1302; X32-NEXT:    calll __extenddftf2
1303; X32-NEXT:    addl $12, %esp
1304; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
1305; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
1306; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
1307; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
1308; X32-NEXT:  .LBB26_4: # %cleanup
1309; X32-NEXT:    movl %edx, (%esi)
1310; X32-NEXT:    movl %edi, 4(%esi)
1311; X32-NEXT:    movl %ecx, 8(%esi)
1312; X32-NEXT:    movl %eax, 12(%esi)
1313; X32-NEXT:    movl %esi, %eax
1314; X32-NEXT:    addl $36, %esp
1315; X32-NEXT:    popl %esi
1316; X32-NEXT:    popl %edi
1317; X32-NEXT:    retl $4
1318;
1319; X64-AVX-LABEL: TestTruncCopysign:
1320; X64-AVX:       # %bb.0: # %entry
1321; X64-AVX-NEXT:    cmpl $50001, %edi # imm = 0xC351
1322; X64-AVX-NEXT:    jl .LBB26_2
1323; X64-AVX-NEXT:  # %bb.1: # %if.then
1324; X64-AVX-NEXT:    pushq %rax
1325; X64-AVX-NEXT:    callq __trunctfdf2@PLT
1326; X64-AVX-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
1327; X64-AVX-NEXT:    vmovddup {{.*#+}} xmm1 = [+Inf,+Inf]
1328; X64-AVX-NEXT:    # xmm1 = mem[0,0]
1329; X64-AVX-NEXT:    vorps %xmm0, %xmm1, %xmm0
1330; X64-AVX-NEXT:    callq __extenddftf2@PLT
1331; X64-AVX-NEXT:    addq $8, %rsp
1332; X64-AVX-NEXT:  .LBB26_2: # %cleanup
1333; X64-AVX-NEXT:    retq
1334entry:
1335  %cmp = icmp sgt i32 %n, 50000
1336  br i1 %cmp, label %if.then, label %cleanup
1337
1338if.then:                                          ; preds = %entry
1339  %conv = fptrunc fp128 %x to double
1340  %call = tail call double @copysign(double 0x7FF0000000000000, double %conv) #2
1341  %conv1 = fpext double %call to fp128
1342  br label %cleanup
1343
1344cleanup:                                          ; preds = %entry, %if.then
1345  %retval.0 = phi fp128 [ %conv1, %if.then ], [ %x, %entry ]
1346  ret fp128 %retval.0
1347}
1348
1349define i1 @PR34866(i128 %x) nounwind {
1350; X64-SSE-LABEL: PR34866:
1351; X64-SSE:       # %bb.0:
1352; X64-SSE-NEXT:    xorps %xmm0, %xmm0
1353; X64-SSE-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
1354; X64-SSE-NEXT:    xorq -{{[0-9]+}}(%rsp), %rsi
1355; X64-SSE-NEXT:    xorq -{{[0-9]+}}(%rsp), %rdi
1356; X64-SSE-NEXT:    orq %rsi, %rdi
1357; X64-SSE-NEXT:    sete %al
1358; X64-SSE-NEXT:    retq
1359;
1360; X32-LABEL: PR34866:
1361; X32:       # %bb.0:
1362; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
1363; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
1364; X32-NEXT:    orl {{[0-9]+}}(%esp), %ecx
1365; X32-NEXT:    orl {{[0-9]+}}(%esp), %eax
1366; X32-NEXT:    orl %ecx, %eax
1367; X32-NEXT:    sete %al
1368; X32-NEXT:    retl
1369;
1370; X64-AVX-LABEL: PR34866:
1371; X64-AVX:       # %bb.0:
1372; X64-AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
1373; X64-AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
1374; X64-AVX-NEXT:    xorq -{{[0-9]+}}(%rsp), %rsi
1375; X64-AVX-NEXT:    xorq -{{[0-9]+}}(%rsp), %rdi
1376; X64-AVX-NEXT:    orq %rsi, %rdi
1377; X64-AVX-NEXT:    sete %al
1378; X64-AVX-NEXT:    retq
1379  %bc_mmx = bitcast fp128 0xL00000000000000000000000000000000 to i128
1380  %cmp = icmp eq i128 %bc_mmx, %x
1381  ret i1 %cmp
1382}
1383
1384define i1 @PR34866_commute(i128 %x) nounwind {
1385; X64-SSE-LABEL: PR34866_commute:
1386; X64-SSE:       # %bb.0:
1387; X64-SSE-NEXT:    xorps %xmm0, %xmm0
1388; X64-SSE-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
1389; X64-SSE-NEXT:    xorq -{{[0-9]+}}(%rsp), %rsi
1390; X64-SSE-NEXT:    xorq -{{[0-9]+}}(%rsp), %rdi
1391; X64-SSE-NEXT:    orq %rsi, %rdi
1392; X64-SSE-NEXT:    sete %al
1393; X64-SSE-NEXT:    retq
1394;
1395; X32-LABEL: PR34866_commute:
1396; X32:       # %bb.0:
1397; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
1398; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
1399; X32-NEXT:    orl {{[0-9]+}}(%esp), %ecx
1400; X32-NEXT:    orl {{[0-9]+}}(%esp), %eax
1401; X32-NEXT:    orl %ecx, %eax
1402; X32-NEXT:    sete %al
1403; X32-NEXT:    retl
1404;
1405; X64-AVX-LABEL: PR34866_commute:
1406; X64-AVX:       # %bb.0:
1407; X64-AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
1408; X64-AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
1409; X64-AVX-NEXT:    xorq -{{[0-9]+}}(%rsp), %rsi
1410; X64-AVX-NEXT:    xorq -{{[0-9]+}}(%rsp), %rdi
1411; X64-AVX-NEXT:    orq %rsi, %rdi
1412; X64-AVX-NEXT:    sete %al
1413; X64-AVX-NEXT:    retq
1414  %bc_mmx = bitcast fp128 0xL00000000000000000000000000000000 to i128
1415  %cmp = icmp eq i128 %x, %bc_mmx
1416  ret i1 %cmp
1417}
1418
1419
1420declare double @copysign(double, double) #1
1421
1422attributes #2 = { nounwind readnone }
1423