1; RUN: llc < %s -mtriple=armv8 -mattr=+neon | FileCheck %s
2define <4 x i32> @vcvtasq(<4 x float>* %A) {
3; CHECK: vcvtasq
4; CHECK: vcvta.s32.f32 q{{[0-9]+}}, q{{[0-9]+}}
5  %tmp1 = load <4 x float>, <4 x float>* %A
6  %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtas.v4i32.v4f32(<4 x float> %tmp1)
7  ret <4 x i32> %tmp2
8}
9
10define <2 x i32> @vcvtasd(<2 x float>* %A) {
11; CHECK: vcvtasd
12; CHECK: vcvta.s32.f32 d{{[0-9]+}}, d{{[0-9]+}}
13  %tmp1 = load <2 x float>, <2 x float>* %A
14  %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtas.v2i32.v2f32(<2 x float> %tmp1)
15  ret <2 x i32> %tmp2
16}
17
18define <4 x i32> @vcvtnsq(<4 x float>* %A) {
19; CHECK: vcvtnsq
20; CHECK: vcvtn.s32.f32 q{{[0-9]+}}, q{{[0-9]+}}
21  %tmp1 = load <4 x float>, <4 x float>* %A
22  %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtns.v4i32.v4f32(<4 x float> %tmp1)
23  ret <4 x i32> %tmp2
24}
25
26define <2 x i32> @vcvtnsd(<2 x float>* %A) {
27; CHECK: vcvtnsd
28; CHECK: vcvtn.s32.f32 d{{[0-9]+}}, d{{[0-9]+}}
29  %tmp1 = load <2 x float>, <2 x float>* %A
30  %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtns.v2i32.v2f32(<2 x float> %tmp1)
31  ret <2 x i32> %tmp2
32}
33
34define <4 x i32> @vcvtpsq(<4 x float>* %A) {
35; CHECK: vcvtpsq
36; CHECK: vcvtp.s32.f32 q{{[0-9]+}}, q{{[0-9]+}}
37  %tmp1 = load <4 x float>, <4 x float>* %A
38  %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtps.v4i32.v4f32(<4 x float> %tmp1)
39  ret <4 x i32> %tmp2
40}
41
42define <2 x i32> @vcvtpsd(<2 x float>* %A) {
43; CHECK: vcvtpsd
44; CHECK: vcvtp.s32.f32 d{{[0-9]+}}, d{{[0-9]+}}
45  %tmp1 = load <2 x float>, <2 x float>* %A
46  %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtps.v2i32.v2f32(<2 x float> %tmp1)
47  ret <2 x i32> %tmp2
48}
49
50define <4 x i32> @vcvtmsq(<4 x float>* %A) {
51; CHECK: vcvtmsq
52; CHECK: vcvtm.s32.f32 q{{[0-9]+}}, q{{[0-9]+}}
53  %tmp1 = load <4 x float>, <4 x float>* %A
54  %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtms.v4i32.v4f32(<4 x float> %tmp1)
55  ret <4 x i32> %tmp2
56}
57
58define <2 x i32> @vcvtmsd(<2 x float>* %A) {
59; CHECK: vcvtmsd
60; CHECK: vcvtm.s32.f32 d{{[0-9]+}}, d{{[0-9]+}}
61  %tmp1 = load <2 x float>, <2 x float>* %A
62  %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtms.v2i32.v2f32(<2 x float> %tmp1)
63  ret <2 x i32> %tmp2
64}
65
66define <4 x i32> @vcvtauq(<4 x float>* %A) {
67; CHECK: vcvtauq
68; CHECK: vcvta.u32.f32 q{{[0-9]+}}, q{{[0-9]+}}
69  %tmp1 = load <4 x float>, <4 x float>* %A
70  %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtau.v4i32.v4f32(<4 x float> %tmp1)
71  ret <4 x i32> %tmp2
72}
73
74define <2 x i32> @vcvtaud(<2 x float>* %A) {
75; CHECK: vcvtaud
76; CHECK: vcvta.u32.f32 d{{[0-9]+}}, d{{[0-9]+}}
77  %tmp1 = load <2 x float>, <2 x float>* %A
78  %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtau.v2i32.v2f32(<2 x float> %tmp1)
79  ret <2 x i32> %tmp2
80}
81
82define <4 x i32> @vcvtnuq(<4 x float>* %A) {
83; CHECK: vcvtnuq
84; CHECK: vcvtn.u32.f32 q{{[0-9]+}}, q{{[0-9]+}}
85  %tmp1 = load <4 x float>, <4 x float>* %A
86  %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtnu.v4i32.v4f32(<4 x float> %tmp1)
87  ret <4 x i32> %tmp2
88}
89
90define <2 x i32> @vcvtnud(<2 x float>* %A) {
91; CHECK: vcvtnud
92; CHECK: vcvtn.u32.f32 d{{[0-9]+}}, d{{[0-9]+}}
93  %tmp1 = load <2 x float>, <2 x float>* %A
94  %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtnu.v2i32.v2f32(<2 x float> %tmp1)
95  ret <2 x i32> %tmp2
96}
97
98define <4 x i32> @vcvtpuq(<4 x float>* %A) {
99; CHECK: vcvtpuq
100; CHECK: vcvtp.u32.f32 q{{[0-9]+}}, q{{[0-9]+}}
101  %tmp1 = load <4 x float>, <4 x float>* %A
102  %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtpu.v4i32.v4f32(<4 x float> %tmp1)
103  ret <4 x i32> %tmp2
104}
105
106define <2 x i32> @vcvtpud(<2 x float>* %A) {
107; CHECK: vcvtpud
108; CHECK: vcvtp.u32.f32 d{{[0-9]+}}, d{{[0-9]+}}
109  %tmp1 = load <2 x float>, <2 x float>* %A
110  %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtpu.v2i32.v2f32(<2 x float> %tmp1)
111  ret <2 x i32> %tmp2
112}
113
114define <4 x i32> @vcvtmuq(<4 x float>* %A) {
115; CHECK: vcvtmuq
116; CHECK: vcvtm.u32.f32 q{{[0-9]+}}, q{{[0-9]+}}
117  %tmp1 = load <4 x float>, <4 x float>* %A
118  %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtmu.v4i32.v4f32(<4 x float> %tmp1)
119  ret <4 x i32> %tmp2
120}
121
122define <2 x i32> @vcvtmud(<2 x float>* %A) {
123; CHECK: vcvtmud
124; CHECK: vcvtm.u32.f32 d{{[0-9]+}}, d{{[0-9]+}}
125  %tmp1 = load <2 x float>, <2 x float>* %A
126  %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtmu.v2i32.v2f32(<2 x float> %tmp1)
127  ret <2 x i32> %tmp2
128}
129
130declare <4 x i32> @llvm.arm.neon.vcvtas.v4i32.v4f32(<4 x float>) nounwind readnone
131declare <2 x i32> @llvm.arm.neon.vcvtas.v2i32.v2f32(<2 x float>) nounwind readnone
132declare <4 x i32> @llvm.arm.neon.vcvtns.v4i32.v4f32(<4 x float>) nounwind readnone
133declare <2 x i32> @llvm.arm.neon.vcvtns.v2i32.v2f32(<2 x float>) nounwind readnone
134declare <4 x i32> @llvm.arm.neon.vcvtps.v4i32.v4f32(<4 x float>) nounwind readnone
135declare <2 x i32> @llvm.arm.neon.vcvtps.v2i32.v2f32(<2 x float>) nounwind readnone
136declare <4 x i32> @llvm.arm.neon.vcvtms.v4i32.v4f32(<4 x float>) nounwind readnone
137declare <2 x i32> @llvm.arm.neon.vcvtms.v2i32.v2f32(<2 x float>) nounwind readnone
138declare <4 x i32> @llvm.arm.neon.vcvtau.v4i32.v4f32(<4 x float>) nounwind readnone
139declare <2 x i32> @llvm.arm.neon.vcvtau.v2i32.v2f32(<2 x float>) nounwind readnone
140declare <4 x i32> @llvm.arm.neon.vcvtnu.v4i32.v4f32(<4 x float>) nounwind readnone
141declare <2 x i32> @llvm.arm.neon.vcvtnu.v2i32.v2f32(<2 x float>) nounwind readnone
142declare <4 x i32> @llvm.arm.neon.vcvtpu.v4i32.v4f32(<4 x float>) nounwind readnone
143declare <2 x i32> @llvm.arm.neon.vcvtpu.v2i32.v2f32(<2 x float>) nounwind readnone
144declare <4 x i32> @llvm.arm.neon.vcvtmu.v4i32.v4f32(<4 x float>) nounwind readnone
145declare <2 x i32> @llvm.arm.neon.vcvtmu.v2i32.v2f32(<2 x float>) nounwind readnone
146