1; RUN: llc -float-abi soft -mattr=+fp16 < %s | FileCheck %s --check-prefix=CHECK --check-prefix=SOFT
2; RUN: llc -float-abi hard -mattr=+fp16 < %s | FileCheck %s --check-prefix=CHECK --check-prefix=HARD
3
4target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
5target triple = "armv7a--none-eabi"
6
7define float @foo(float %a.coerce, float %b.coerce) {
8entry:
9  %0 = bitcast float %a.coerce to i32
10  %tmp.0.extract.trunc = trunc i32 %0 to i16
11  %1 = bitcast i16 %tmp.0.extract.trunc to half
12  %2 = bitcast float %b.coerce to i32
13  %tmp1.0.extract.trunc = trunc i32 %2 to i16
14  %3 = bitcast i16 %tmp1.0.extract.trunc to half
15  %4 = fadd half %1, %3
16  %5 = bitcast half %4 to i16
17  %tmp5.0.insert.ext = zext i16 %5 to i32
18  %6 = bitcast i32 %tmp5.0.insert.ext to float
19  ret float %6
20; CHECK: foo:
21
22; SOFT: vmov    {{s[0-9]+}}, r1
23; SOFT: vmov    {{s[0-9]+}}, r0
24; SOFT: vcvtb.f32.f16   {{s[0-9]+}}, {{s[0-9]+}}
25; SOFT: vcvtb.f32.f16   {{s[0-9]+}}, {{s[0-9]+}}
26; SOFT: vadd.f32        {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
27; SOFT: vcvtb.f16.f32   {{s[0-9]+}}, {{s[0-9]+}}
28; SOFT: vmov    r0, {{s[0-9]+}}
29
30; HARD-NOT: vmov
31; HARD-NOT: uxth
32; HARD: vcvtb.f32.f16   {{s[0-9]+}}, s1
33; HARD: vcvtb.f32.f16   {{s[0-9]+}}, s0
34; HARD: vadd.f32        {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
35; HARD: vcvtb.f16.f32   [[SREG:s[0-9]+]], {{s[0-9]+}}
36; HARD-NEXT: vmov            [[REG0:r[0-9]+]], [[SREG]]
37; HARD-NEXT: uxth            [[REG1:r[0-9]+]], [[REG0]]
38; HARD-NEXT: vmov            s0, [[REG1]]
39
40; CHECK: bx lr
41}
42