1; RUN: llc < %s -mtriple=thumbv7-none-eabi   -mcpu=cortex-m3                    | FileCheck %s -check-prefix=CHECK -check-prefix=SOFT -check-prefix=NONE
2; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m4                    | FileCheck %s -check-prefix=CHECK -check-prefix=SOFT -check-prefix=SP
3; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m7                    | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=VFP  -check-prefix=FP-ARMv8
4; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m7 -mattr=-fp64 | FileCheck %s -check-prefix=CHECK -check-prefix=SOFT -check-prefix=SP
5; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a7                    | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=NEON -check-prefix=VFP4
6; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a57                   | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=NEON -check-prefix=FP-ARMv8
7
8declare double     @llvm.sqrt.f64(double %Val)
9define double @sqrt_d(double %a) {
10; CHECK-LABEL: sqrt_d:
11; SOFT: {{(bl|b)}} sqrt
12; HARD: vsqrt.f64 d0, d0
13  %1 = call double @llvm.sqrt.f64(double %a)
14  ret double %1
15}
16
17declare double     @llvm.powi.f64.i32(double %Val, i32 %power)
18define double @powi_d(double %a, i32 %b) {
19; CHECK-LABEL: powi_d:
20; SOFT: {{(bl|b)}} __powidf2
21; HARD: b __powidf2
22  %1 = call double @llvm.powi.f64.i32(double %a, i32 %b)
23  ret double %1
24}
25
26declare double     @llvm.sin.f64(double %Val)
27define double @sin_d(double %a) {
28; CHECK-LABEL: sin_d:
29; SOFT: {{(bl|b)}} sin
30; HARD: b sin
31  %1 = call double @llvm.sin.f64(double %a)
32  ret double %1
33}
34
35declare double     @llvm.cos.f64(double %Val)
36define double @cos_d(double %a) {
37; CHECK-LABEL: cos_d:
38; SOFT: {{(bl|b)}} cos
39; HARD: b cos
40  %1 = call double @llvm.cos.f64(double %a)
41  ret double %1
42}
43
44declare double     @llvm.pow.f64(double %Val, double %power)
45define double @pow_d(double %a, double %b) {
46; CHECK-LABEL: pow_d:
47; SOFT: {{(bl|b)}} pow
48; HARD: b pow
49  %1 = call double @llvm.pow.f64(double %a, double %b)
50  ret double %1
51}
52
53declare double     @llvm.exp.f64(double %Val)
54define double @exp_d(double %a) {
55; CHECK-LABEL: exp_d:
56; SOFT: {{(bl|b)}} exp
57; HARD: b exp
58  %1 = call double @llvm.exp.f64(double %a)
59  ret double %1
60}
61
62declare double     @llvm.exp2.f64(double %Val)
63define double @exp2_d(double %a) {
64; CHECK-LABEL: exp2_d:
65; SOFT: {{(bl|b)}} exp2
66; HARD: b exp2
67  %1 = call double @llvm.exp2.f64(double %a)
68  ret double %1
69}
70
71declare double     @llvm.log.f64(double %Val)
72define double @log_d(double %a) {
73; CHECK-LABEL: log_d:
74; SOFT: {{(bl|b)}} log
75; HARD: b log
76  %1 = call double @llvm.log.f64(double %a)
77  ret double %1
78}
79
80declare double     @llvm.log10.f64(double %Val)
81define double @log10_d(double %a) {
82; CHECK-LABEL: log10_d:
83; SOFT: {{(bl|b)}} log10
84; HARD: b log10
85  %1 = call double @llvm.log10.f64(double %a)
86  ret double %1
87}
88
89declare double     @llvm.log2.f64(double %Val)
90define double @log2_d(double %a) {
91; CHECK-LABEL: log2_d:
92; SOFT: {{(bl|b)}} log2
93; HARD: b log2
94  %1 = call double @llvm.log2.f64(double %a)
95  ret double %1
96}
97
98declare double     @llvm.fma.f64(double %a, double %b, double %c)
99define double @fma_d(double %a, double %b, double %c) {
100; CHECK-LABEL: fma_d:
101; SOFT: {{(bl|b)}} fma
102; HARD: vfma.f64
103  %1 = call double @llvm.fma.f64(double %a, double %b, double %c)
104  ret double %1
105}
106
107; FIXME: the FPv4-SP version is less efficient than the no-FPU version
108declare double     @llvm.fabs.f64(double %Val)
109define double @abs_d(double %a) {
110; CHECK-LABEL: abs_d:
111; NONE: bic r1, r1, #-2147483648
112; SP: vldr [[D1:d[0-9]+]], .LCPI{{.*}}
113; SP-DAG: vmov [[R2:r[0-9]+]], [[R3:r[0-9]+]], [[D1]]
114; SP-DAG: vmov [[R0:r[0-9]+]], [[R1:r[0-9]+]], [[D0:d[0-9]+]]
115; SP: lsrs [[R4:r[0-9]+]], [[R3]], #31
116; SP: bfi [[R5:r[0-9]+]], [[R4]], #31, #1
117; SP: vmov [[D0]], [[R0]], [[R5]]
118; DP: vabs.f64 d0, d0
119  %1 = call double @llvm.fabs.f64(double %a)
120  ret double %1
121}
122
123declare double     @llvm.copysign.f64(double  %Mag, double  %Sgn)
124define double @copysign_d(double %a, double %b) {
125; CHECK-LABEL: copysign_d:
126; SOFT: lsrs [[REG:r[0-9]+]], {{r[0-9]+}}, #31
127; SOFT: bfi {{r[0-9]+}}, [[REG]], #31, #1
128; VFP: lsrs [[REG:r[0-9]+]], {{r[0-9]+}}, #31
129; VFP: bfi {{r[0-9]+}}, [[REG]], #31, #1
130; NEON:         vmov.i32 d16, #0x80000000
131; NEON-NEXT:    vshl.i64 d16, d16, #32
132; NEON-NEXT:    vbit d0, d1, d16
133; NEON-NEXT:    bx lr
134  %1 = call double @llvm.copysign.f64(double %a, double %b)
135  ret double %1
136}
137
138declare double     @llvm.floor.f64(double %Val)
139define double @floor_d(double %a) {
140; CHECK-LABEL: floor_d:
141; SOFT: {{(bl|b)}} floor
142; VFP4: b floor
143; FP-ARMv8: vrintm.f64
144  %1 = call double @llvm.floor.f64(double %a)
145  ret double %1
146}
147
148declare double     @llvm.ceil.f64(double %Val)
149define double @ceil_d(double %a) {
150; CHECK-LABEL: ceil_d:
151; SOFT: {{(bl|b)}} ceil
152; VFP4: b ceil
153; FP-ARMv8: vrintp.f64
154  %1 = call double @llvm.ceil.f64(double %a)
155  ret double %1
156}
157
158declare double     @llvm.trunc.f64(double %Val)
159define double @trunc_d(double %a) {
160; CHECK-LABEL: trunc_d:
161; SOFT: {{(bl|b)}} trunc
162; FFP4: b trunc
163; FP-ARMv8: vrintz.f64
164  %1 = call double @llvm.trunc.f64(double %a)
165  ret double %1
166}
167
168declare double     @llvm.rint.f64(double %Val)
169define double @rint_d(double %a) {
170; CHECK-LABEL: rint_d:
171; SOFT: {{(bl|b)}} rint
172; VFP4: b rint
173; FP-ARMv8: vrintx.f64
174  %1 = call double @llvm.rint.f64(double %a)
175  ret double %1
176}
177
178declare double     @llvm.nearbyint.f64(double %Val)
179define double @nearbyint_d(double %a) {
180; CHECK-LABEL: nearbyint_d:
181; SOFT: {{(bl|b)}} nearbyint
182; VFP4: b nearbyint
183; FP-ARMv8: vrintr.f64
184  %1 = call double @llvm.nearbyint.f64(double %a)
185  ret double %1
186}
187
188declare double     @llvm.round.f64(double %Val)
189define double @round_d(double %a) {
190; CHECK-LABEL: round_d:
191; SOFT: {{(bl|b)}} round
192; VFP4: b round
193; FP-ARMv8: vrinta.f64
194  %1 = call double @llvm.round.f64(double %a)
195  ret double %1
196}
197
198declare double     @llvm.fmuladd.f64(double %a, double %b, double %c)
199define double @fmuladd_d(double %a, double %b, double %c) {
200; CHECK-LABEL: fmuladd_d:
201; SOFT: bl __aeabi_dmul
202; SOFT: bl __aeabi_dadd
203; VFP4: vmul.f64
204; VFP4: vadd.f64
205; FP-ARMv8: vfma.f64
206  %1 = call double @llvm.fmuladd.f64(double %a, double %b, double %c)
207  ret double %1
208}
209
210declare i16 @llvm.convert.to.fp16.f64(double %a)
211define i16 @d_to_h(double %a) {
212; CHECK-LABEL: d_to_h:
213; SOFT: bl __aeabi_d2h
214; VFP4: bl __aeabi_d2h
215; FP-ARMv8: vcvt{{[bt]}}.f16.f64
216  %1 = call i16 @llvm.convert.to.fp16.f64(double %a)
217  ret i16 %1
218}
219
220declare double @llvm.convert.from.fp16.f64(i16 %a)
221define double @h_to_d(i16 %a) {
222; CHECK-LABEL: h_to_d:
223; NONE: bl __aeabi_h2f
224; NONE: bl __aeabi_f2d
225; SP: vcvt{{[bt]}}.f32.f16
226; SP: bl __aeabi_f2d
227; VFPv4: vcvt{{[bt]}}.f32.f16
228; VFPv4: vcvt.f64.f32
229; FP-ARMv8: vcvt{{[bt]}}.f64.f16
230  %1 = call double @llvm.convert.from.fp16.f64(i16 %a)
231  ret double %1
232}
233