1; RUN: llc < %s -mtriple=thumbv7-none-eabi   -mcpu=cortex-m3                    | FileCheck %s -check-prefix=CHECK -check-prefix=SOFT -check-prefix=NONE
2; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m4                    | FileCheck %s -check-prefix=CHECK -check-prefix=SOFT -check-prefix=SP
3; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m7                    | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=VFP  -check-prefix=FP-ARMv8
4; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m7 -mattr=+fp-only-sp | FileCheck %s -check-prefix=CHECK -check-prefix=SOFT -check-prefix=SP
5; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a7                    | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=NEON -check-prefix=VFP4
6; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a57                   | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=NEON -check-prefix=FP-ARMv8
7
8declare double     @llvm.sqrt.f64(double %Val)
9define double @sqrt_d(double %a) {
10; CHECK-LABEL: sqrt_d:
11; SOFT: {{(bl|b)}} sqrt
12; HARD: vsqrt.f64 d0, d0
13  %1 = call double @llvm.sqrt.f64(double %a)
14  ret double %1
15}
16
17declare double     @llvm.powi.f64(double %Val, i32 %power)
18define double @powi_d(double %a, i32 %b) {
19; CHECK-LABEL: powi_d:
20; SOFT: {{(bl|b)}} __powidf2
21; HARD: b __powidf2
22  %1 = call double @llvm.powi.f64(double %a, i32 %b)
23  ret double %1
24}
25
26declare double     @llvm.sin.f64(double %Val)
27define double @sin_d(double %a) {
28; CHECK-LABEL: sin_d:
29; SOFT: {{(bl|b)}} sin
30; HARD: b sin
31  %1 = call double @llvm.sin.f64(double %a)
32  ret double %1
33}
34
35declare double     @llvm.cos.f64(double %Val)
36define double @cos_d(double %a) {
37; CHECK-LABEL: cos_d:
38; SOFT: {{(bl|b)}} cos
39; HARD: b cos
40  %1 = call double @llvm.cos.f64(double %a)
41  ret double %1
42}
43
44declare double     @llvm.pow.f64(double %Val, double %power)
45define double @pow_d(double %a, double %b) {
46; CHECK-LABEL: pow_d:
47; SOFT: {{(bl|b)}} pow
48; HARD: b pow
49  %1 = call double @llvm.pow.f64(double %a, double %b)
50  ret double %1
51}
52
53declare double     @llvm.exp.f64(double %Val)
54define double @exp_d(double %a) {
55; CHECK-LABEL: exp_d:
56; SOFT: {{(bl|b)}} exp
57; HARD: b exp
58  %1 = call double @llvm.exp.f64(double %a)
59  ret double %1
60}
61
62declare double     @llvm.exp2.f64(double %Val)
63define double @exp2_d(double %a) {
64; CHECK-LABEL: exp2_d:
65; SOFT: {{(bl|b)}} exp2
66; HARD: b exp2
67  %1 = call double @llvm.exp2.f64(double %a)
68  ret double %1
69}
70
71declare double     @llvm.log.f64(double %Val)
72define double @log_d(double %a) {
73; CHECK-LABEL: log_d:
74; SOFT: {{(bl|b)}} log
75; HARD: b log
76  %1 = call double @llvm.log.f64(double %a)
77  ret double %1
78}
79
80declare double     @llvm.log10.f64(double %Val)
81define double @log10_d(double %a) {
82; CHECK-LABEL: log10_d:
83; SOFT: {{(bl|b)}} log10
84; HARD: b log10
85  %1 = call double @llvm.log10.f64(double %a)
86  ret double %1
87}
88
89declare double     @llvm.log2.f64(double %Val)
90define double @log2_d(double %a) {
91; CHECK-LABEL: log2_d:
92; SOFT: {{(bl|b)}} log2
93; HARD: b log2
94  %1 = call double @llvm.log2.f64(double %a)
95  ret double %1
96}
97
98declare double     @llvm.fma.f64(double %a, double %b, double %c)
99define double @fma_d(double %a, double %b, double %c) {
100; CHECK-LABEL: fma_d:
101; SOFT: {{(bl|b)}} fma
102; HARD: vfma.f64
103  %1 = call double @llvm.fma.f64(double %a, double %b, double %c)
104  ret double %1
105}
106
107; FIXME: the FPv4-SP version is less efficient than the no-FPU version
108declare double     @llvm.fabs.f64(double %Val)
109define double @abs_d(double %a) {
110; CHECK-LABEL: abs_d:
111; NONE: bic r1, r1, #-2147483648
112; SP: bl __aeabi_dcmpgt
113; SP: bl __aeabi_dcmpun
114; SP: bl __aeabi_dsub
115; DP: vabs.f64 d0, d0
116  %1 = call double @llvm.fabs.f64(double %a)
117  ret double %1
118}
119
120declare double     @llvm.copysign.f64(double  %Mag, double  %Sgn)
121define double @copysign_d(double %a, double %b) {
122; CHECK-LABEL: copysign_d:
123; SOFT: lsrs [[REG:r[0-9]+]], r3, #31
124; SOFT: bfi r1, [[REG]], #31, #1
125; VFP: lsrs [[REG:r[0-9]+]], r3, #31
126; VFP: bfi r1, [[REG]], #31, #1
127; NEON: vmov.i32 [[REG:d[0-9]+]], #0x80000000
128; NEON: vshl.i64 [[REG]], [[REG]], #32
129; NEON: vbsl [[REG]], d
130  %1 = call double @llvm.copysign.f64(double %a, double %b)
131  ret double %1
132}
133
134declare double     @llvm.floor.f64(double %Val)
135define double @floor_d(double %a) {
136; CHECK-LABEL: floor_d:
137; SOFT: {{(bl|b)}} floor
138; VFP4: b floor
139; FP-ARMv8: vrintm.f64
140  %1 = call double @llvm.floor.f64(double %a)
141  ret double %1
142}
143
144declare double     @llvm.ceil.f64(double %Val)
145define double @ceil_d(double %a) {
146; CHECK-LABEL: ceil_d:
147; SOFT: {{(bl|b)}} ceil
148; VFP4: b ceil
149; FP-ARMv8: vrintp.f64
150  %1 = call double @llvm.ceil.f64(double %a)
151  ret double %1
152}
153
154declare double     @llvm.trunc.f64(double %Val)
155define double @trunc_d(double %a) {
156; CHECK-LABEL: trunc_d:
157; SOFT: {{(bl|b)}} trunc
158; FFP4: b trunc
159; FP-ARMv8: vrintz.f64
160  %1 = call double @llvm.trunc.f64(double %a)
161  ret double %1
162}
163
164declare double     @llvm.rint.f64(double %Val)
165define double @rint_d(double %a) {
166; CHECK-LABEL: rint_d:
167; SOFT: {{(bl|b)}} rint
168; VFP4: b rint
169; FP-ARMv8: vrintx.f64
170  %1 = call double @llvm.rint.f64(double %a)
171  ret double %1
172}
173
174declare double     @llvm.nearbyint.f64(double %Val)
175define double @nearbyint_d(double %a) {
176; CHECK-LABEL: nearbyint_d:
177; SOFT: {{(bl|b)}} nearbyint
178; VFP4: b nearbyint
179; FP-ARMv8: vrintr.f64
180  %1 = call double @llvm.nearbyint.f64(double %a)
181  ret double %1
182}
183
184declare double     @llvm.round.f64(double %Val)
185define double @round_d(double %a) {
186; CHECK-LABEL: round_d:
187; SOFT: {{(bl|b)}} round
188; VFP4: b round
189; FP-ARMv8: vrinta.f64
190  %1 = call double @llvm.round.f64(double %a)
191  ret double %1
192}
193
194declare double     @llvm.fmuladd.f64(double %a, double %b, double %c)
195define double @fmuladd_d(double %a, double %b, double %c) {
196; CHECK-LABEL: fmuladd_d:
197; SOFT: bl __aeabi_dmul
198; SOFT: bl __aeabi_dadd
199; VFP4: vmul.f64
200; VFP4: vadd.f64
201; FP-ARMv8: vmla.f64
202  %1 = call double @llvm.fmuladd.f64(double %a, double %b, double %c)
203  ret double %1
204}
205
206declare i16 @llvm.convert.to.fp16.f64(double %a)
207define i16 @d_to_h(double %a) {
208; CHECK-LABEL: d_to_h:
209; SOFT: bl __aeabi_d2h
210; VFP4: bl __aeabi_d2h
211; FP-ARMv8: vcvt{{[bt]}}.f16.f64
212  %1 = call i16 @llvm.convert.to.fp16.f64(double %a)
213  ret i16 %1
214}
215
216declare double @llvm.convert.from.fp16.f64(i16 %a)
217define double @h_to_d(i16 %a) {
218; CHECK-LABEL: h_to_d:
219; NONE: bl __gnu_h2f_ieee
220; NONE: bl __aeabi_f2d
221; SP: vcvt{{[bt]}}.f32.f16
222; SP: bl __aeabi_f2d
223; VFPv4: vcvt{{[bt]}}.f32.f16
224; VFPv4: vcvt.f64.f32
225; FP-ARMv8: vcvt{{[bt]}}.f64.f16
226  %1 = call double @llvm.convert.from.fp16.f64(i16 %a)
227  ret double %1
228}
229