1; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mattr=mips16 -mattr=+soft-float -mips16-hard-float -relocation-model=static  < %s | FileCheck  %s -check-prefix=__call_stub_fp___fixunsdfsi
2
3; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mattr=mips16 -mattr=+soft-float -mips16-hard-float -relocation-model=static  < %s | FileCheck %s -check-prefix=__call_stub_fp___floatdidf
4
5; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mattr=mips16 -mattr=+soft-float -mips16-hard-float -relocation-model=static  < %s | FileCheck %s -check-prefix=__call_stub_fp___floatdisf
6
7; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mattr=mips16 -mattr=+soft-float -mips16-hard-float -relocation-model=static  < %s | FileCheck %s -check-prefix=__call_stub_fp___floatundidf
8
9; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mattr=mips16 -mattr=+soft-float -mips16-hard-float -relocation-model=static  < %s | FileCheck %s -check-prefix=__call_stub_fp___fixsfdi
10
11; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mattr=mips16 -mattr=+soft-float -mips16-hard-float -relocation-model=static  < %s | FileCheck %s -check-prefix=__call_stub_fp___fixunsdfdi
12
13; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mattr=mips16 -mattr=+soft-float -mips16-hard-float -relocation-model=static  < %s | FileCheck %s -check-prefix=__call_stub_fp___fixdfdi
14
15; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mattr=mips16 -mattr=+soft-float -mips16-hard-float -relocation-model=static  < %s | FileCheck %s -check-prefix=__call_stub_fp___fixunssfsi
16
17; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mattr=mips16 -mattr=+soft-float -mips16-hard-float -relocation-model=static  < %s | FileCheck %s -check-prefix=__call_stub_fp___fixunssfdi
18
19; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mattr=mips16 -mattr=+soft-float -mips16-hard-float -relocation-model=static  < %s | FileCheck %s -check-prefix=__call_stub_fp___floatundisf
20
21@ll1 = global i64 0, align 8
22@ll2 = global i64 0, align 8
23@ll3 = global i64 0, align 8
24@l1 = global i32 0, align 4
25@l2 = global i32 0, align 4
26@l3 = global i32 0, align 4
27@ull1 = global i64 0, align 8
28@ull2 = global i64 0, align 8
29@ull3 = global i64 0, align 8
30@ul1 = global i32 0, align 4
31@ul2 = global i32 0, align 4
32@ul3 = global i32 0, align 4
33@d1 = global double 0.000000e+00, align 8
34@d2 = global double 0.000000e+00, align 8
35@d3 = global double 0.000000e+00, align 8
36@d4 = global double 0.000000e+00, align 8
37@f1 = global float 0.000000e+00, align 4
38@f2 = global float 0.000000e+00, align 4
39@f3 = global float 0.000000e+00, align 4
40@f4 = global float 0.000000e+00, align 4
41
42; Function Attrs: nounwind
43define void @_Z3foov() #0 {
44entry:
45  %0 = load double, double* @d1, align 8
46  %conv = fptosi double %0 to i64
47  store i64 %conv, i64* @ll1, align 8
48  %1 = load double, double* @d2, align 8
49  %conv1 = fptoui double %1 to i64
50  store i64 %conv1, i64* @ull1, align 8
51  %2 = load float, float* @f1, align 4
52  %conv2 = fptosi float %2 to i64
53  store i64 %conv2, i64* @ll2, align 8
54  %3 = load float, float* @f2, align 4
55  %conv3 = fptoui float %3 to i64
56  store i64 %conv3, i64* @ull2, align 8
57  %4 = load double, double* @d3, align 8
58  %conv4 = fptosi double %4 to i32
59  store i32 %conv4, i32* @l1, align 4
60  %5 = load double, double* @d4, align 8
61  %conv5 = fptoui double %5 to i32
62  store i32 %conv5, i32* @ul1, align 4
63  %6 = load float, float* @f3, align 4
64  %conv6 = fptosi float %6 to i32
65  store i32 %conv6, i32* @l2, align 4
66  %7 = load float, float* @f4, align 4
67  %conv7 = fptoui float %7 to i32
68  store i32 %conv7, i32* @ul2, align 4
69  ret void
70}
71
72; Function Attrs: nounwind
73define void @_Z3goov() #0 {
74entry:
75  %0 = load i64, i64* @ll1, align 8
76  %conv = sitofp i64 %0 to double
77  store double %conv, double* @d1, align 8
78  %1 = load i64, i64* @ull1, align 8
79  %conv1 = uitofp i64 %1 to double
80  store double %conv1, double* @d2, align 8
81  %2 = load i64, i64* @ll2, align 8
82  %conv2 = sitofp i64 %2 to float
83  store float %conv2, float* @f1, align 4
84  %3 = load i64, i64* @ull2, align 8
85  %conv3 = uitofp i64 %3 to float
86  store float %conv3, float* @f2, align 4
87  %4 = load i32, i32* @l1, align 4
88  %conv4 = sitofp i32 %4 to double
89  store double %conv4, double* @d3, align 8
90  %5 = load i32, i32* @ul1, align 4
91  %conv5 = uitofp i32 %5 to double
92  store double %conv5, double* @d4, align 8
93  %6 = load i32, i32* @l2, align 4
94  %conv6 = sitofp i32 %6 to float
95  store float %conv6, float* @f3, align 4
96  %7 = load i32, i32* @ul2, align 4
97  %conv7 = uitofp i32 %7 to float
98  store float %conv7, float* @f4, align 4
99  ret void
100}
101
102attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
103
104; __call_stub_fp___fixunsdfsi:  __call_stub_fp___fixunsdfsi:
105; __call_stub_fp___floatdidf:  __call_stub_fp___floatdidf:
106; __call_stub_fp___floatdisf:  __call_stub_fp___floatdisf:
107; __call_stub_fp___floatundidf:  __call_stub_fp___floatundidf:
108; __call_stub_fp___fixsfdi:  __call_stub_fp___fixsfdi:
109; __call_stub_fp___fixunsdfdi:  __call_stub_fp___fixunsdfdi:
110; __call_stub_fp___fixdfdi:  __call_stub_fp___fixdfdi:
111; __call_stub_fp___fixunssfsi:  __call_stub_fp___fixunssfsi:
112; __call_stub_fp___fixunssfdi:  __call_stub_fp___fixunssfdi:
113; __call_stub_fp___floatundisf:  __call_stub_fp___floatundisf:
114
115