1 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon -target-feature +fullfp16 -target-feature +v8.2a\
2 // RUN: -fallow-half-arguments-and-returns -S -disable-O0-optnone -emit-llvm -o - %s \
3 // RUN: | opt -S -mem2reg \
4 // RUN: | FileCheck %s
5 
6 // REQUIRES: aarch64-registered-target
7 
8 #include <arm_neon.h>
9 
10 // CHECK-LABEL: test_vabs_f16
11 // CHECK:  [[ABS:%.*]] =  call <4 x half> @llvm.fabs.v4f16(<4 x half> %a)
12 // CHECK:  ret <4 x half> [[ABS]]
test_vabs_f16(float16x4_t a)13 float16x4_t test_vabs_f16(float16x4_t a) {
14   return vabs_f16(a);
15 }
16 
17 // CHECK-LABEL: test_vabsq_f16
18 // CHECK:  [[ABS:%.*]] = call <8 x half> @llvm.fabs.v8f16(<8 x half> %a)
19 // CHECK:  ret <8 x half> [[ABS]]
test_vabsq_f16(float16x8_t a)20 float16x8_t test_vabsq_f16(float16x8_t a) {
21   return vabsq_f16(a);
22 }
23 
24 // CHECK-LABEL: test_vceqz_f16
25 // CHECK:  [[TMP1:%.*]] = fcmp oeq <4 x half> %a, zeroinitializer
26 // CHECK:  [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
27 // CHECK:  ret <4 x i16> [[TMP2]]
test_vceqz_f16(float16x4_t a)28 uint16x4_t test_vceqz_f16(float16x4_t a) {
29   return vceqz_f16(a);
30 }
31 
32 // CHECK-LABEL: test_vceqzq_f16
33 // CHECK:  [[TMP1:%.*]] = fcmp oeq <8 x half> %a, zeroinitializer
34 // CHECK:  [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16>
35 // CHECK:  ret <8 x i16> [[TMP2]]
test_vceqzq_f16(float16x8_t a)36 uint16x8_t test_vceqzq_f16(float16x8_t a) {
37   return vceqzq_f16(a);
38 }
39 
40 // CHECK-LABEL: test_vcgez_f16
41 // CHECK:  [[TMP1:%.*]] = fcmp oge <4 x half> %a, zeroinitializer
42 // CHECK:  [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
43 // CHECK:  ret <4 x i16> [[TMP2]]
test_vcgez_f16(float16x4_t a)44 uint16x4_t test_vcgez_f16(float16x4_t a) {
45   return vcgez_f16(a);
46 }
47 
48 // CHECK-LABEL: test_vcgezq_f16
49 // CHECK:  [[TMP1:%.*]] = fcmp oge <8 x half> %a, zeroinitializer
50 // CHECK:  [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16>
51 // CHECK:  ret <8 x i16> [[TMP2]]
test_vcgezq_f16(float16x8_t a)52 uint16x8_t test_vcgezq_f16(float16x8_t a) {
53   return vcgezq_f16(a);
54 }
55 
56 // CHECK-LABEL: test_vcgtz_f16
57 // CHECK:  [[TMP1:%.*]] = fcmp ogt <4 x half> %a, zeroinitializer
58 // CHECK:  [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
59 // CHECK:  ret <4 x i16> [[TMP2]]
test_vcgtz_f16(float16x4_t a)60 uint16x4_t test_vcgtz_f16(float16x4_t a) {
61   return vcgtz_f16(a);
62 }
63 
64 // CHECK-LABEL: test_vcgtzq_f16
65 // CHECK:  [[TMP1:%.*]] = fcmp ogt <8 x half> %a, zeroinitializer
66 // CHECK:  [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16>
67 // CHECK:  ret <8 x i16> [[TMP2]]
test_vcgtzq_f16(float16x8_t a)68 uint16x8_t test_vcgtzq_f16(float16x8_t a) {
69   return vcgtzq_f16(a);
70 }
71 
72 // CHECK-LABEL: test_vclez_f16
73 // CHECK:  [[TMP1:%.*]] = fcmp ole <4 x half> %a, zeroinitializer
74 // CHECK:  [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
75 // CHECK:  ret <4 x i16> [[TMP2]]
test_vclez_f16(float16x4_t a)76 uint16x4_t test_vclez_f16(float16x4_t a) {
77   return vclez_f16(a);
78 }
79 
80 // CHECK-LABEL: test_vclezq_f16
81 // CHECK:  [[TMP1:%.*]] = fcmp ole <8 x half> %a, zeroinitializer
82 // CHECK:  [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16>
83 // CHECK:  ret <8 x i16> [[TMP2]]
test_vclezq_f16(float16x8_t a)84 uint16x8_t test_vclezq_f16(float16x8_t a) {
85   return vclezq_f16(a);
86 }
87 
88 // CHECK-LABEL: test_vcltz_f16
89 // CHECK:  [[TMP1:%.*]] = fcmp olt <4 x half> %a, zeroinitializer
90 // CHECK:  [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
91 // CHECK:  ret <4 x i16> [[TMP2]]
test_vcltz_f16(float16x4_t a)92 uint16x4_t test_vcltz_f16(float16x4_t a) {
93   return vcltz_f16(a);
94 }
95 
96 // CHECK-LABEL: test_vcltzq_f16
97 // CHECK:  [[TMP1:%.*]] = fcmp olt <8 x half> %a, zeroinitializer
98 // CHECK:  [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16>
99 // CHECK:  ret <8 x i16> [[TMP2]]
test_vcltzq_f16(float16x8_t a)100 uint16x8_t test_vcltzq_f16(float16x8_t a) {
101   return vcltzq_f16(a);
102 }
103 
104 // CHECK-LABEL: test_vcvt_f16_s16
105 // CHECK:  [[VCVT:%.*]] = sitofp <4 x i16> %a to <4 x half>
106 // CHECK:  ret <4 x half> [[VCVT]]
test_vcvt_f16_s16(int16x4_t a)107 float16x4_t test_vcvt_f16_s16 (int16x4_t a) {
108   return vcvt_f16_s16(a);
109 }
110 
111 // CHECK-LABEL: test_vcvtq_f16_s16
112 // CHECK:  [[VCVT:%.*]] = sitofp <8 x i16> %a to <8 x half>
113 // CHECK:  ret <8 x half> [[VCVT]]
test_vcvtq_f16_s16(int16x8_t a)114 float16x8_t test_vcvtq_f16_s16 (int16x8_t a) {
115   return vcvtq_f16_s16(a);
116 }
117 
118 // CHECK-LABEL: test_vcvt_f16_u16
119 // CHECK:  [[VCVT:%.*]] = uitofp <4 x i16> %a to <4 x half>
120 // CHECK:  ret <4 x half> [[VCVT]]
test_vcvt_f16_u16(uint16x4_t a)121 float16x4_t test_vcvt_f16_u16 (uint16x4_t a) {
122   return vcvt_f16_u16(a);
123 }
124 
125 // CHECK-LABEL: test_vcvtq_f16_u16
126 // CHECK:  [[VCVT:%.*]] = uitofp <8 x i16> %a to <8 x half>
127 // CHECK:  ret <8 x half> [[VCVT]]
test_vcvtq_f16_u16(uint16x8_t a)128 float16x8_t test_vcvtq_f16_u16 (uint16x8_t a) {
129   return vcvtq_f16_u16(a);
130 }
131 
132 // CHECK-LABEL: test_vcvt_s16_f16
133 // CHECK:  [[VCVT:%.*]] = fptosi <4 x half> %a to <4 x i16>
134 // CHECK:  ret <4 x i16> [[VCVT]]
test_vcvt_s16_f16(float16x4_t a)135 int16x4_t test_vcvt_s16_f16 (float16x4_t a) {
136   return vcvt_s16_f16(a);
137 }
138 
139 // CHECK-LABEL: test_vcvtq_s16_f16
140 // CHECK:  [[VCVT:%.*]] = fptosi <8 x half> %a to <8 x i16>
141 // CHECK:  ret <8 x i16> [[VCVT]]
test_vcvtq_s16_f16(float16x8_t a)142 int16x8_t test_vcvtq_s16_f16 (float16x8_t a) {
143   return vcvtq_s16_f16(a);
144 }
145 
146 // CHECK-LABEL: test_vcvt_u16_f16
147 // CHECK:  [[VCVT:%.*]] = fptoui <4 x half> %a to <4 x i16>
148 // CHECK:  ret <4 x i16> [[VCVT]]
test_vcvt_u16_f16(float16x4_t a)149 int16x4_t test_vcvt_u16_f16 (float16x4_t a) {
150   return vcvt_u16_f16(a);
151 }
152 
153 // CHECK-LABEL: test_vcvtq_u16_f16
154 // CHECK:  [[VCVT:%.*]] = fptoui <8 x half> %a to <8 x i16>
155 // CHECK:  ret <8 x i16> [[VCVT]]
test_vcvtq_u16_f16(float16x8_t a)156 int16x8_t test_vcvtq_u16_f16 (float16x8_t a) {
157   return vcvtq_u16_f16(a);
158 }
159 
160 // CHECK-LABEL: test_vcvta_s16_f16
161 // CHECK:  [[VCVT:%.*]] = call <4 x i16> @llvm.aarch64.neon.fcvtas.v4i16.v4f16(<4 x half> %a)
162 // CHECK:  ret <4 x i16> [[VCVT]]
test_vcvta_s16_f16(float16x4_t a)163 int16x4_t test_vcvta_s16_f16 (float16x4_t a) {
164   return vcvta_s16_f16(a);
165 }
166 
167 // CHECK-LABEL: test_vcvta_u16_f16
168 // CHECK:  [[VCVT:%.*]] = call <4 x i16> @llvm.aarch64.neon.fcvtau.v4i16.v4f16(<4 x half> %a)
169 // CHECK:  ret <4 x i16> [[VCVT]]
test_vcvta_u16_f16(float16x4_t a)170 int16x4_t test_vcvta_u16_f16 (float16x4_t a) {
171   return vcvta_u16_f16(a);
172 }
173 
174 // CHECK-LABEL: test_vcvtaq_s16_f16
175 // CHECK:  [[VCVT:%.*]] = call <8 x i16> @llvm.aarch64.neon.fcvtas.v8i16.v8f16(<8 x half> %a)
176 // CHECK:  ret <8 x i16> [[VCVT]]
test_vcvtaq_s16_f16(float16x8_t a)177 int16x8_t test_vcvtaq_s16_f16 (float16x8_t a) {
178   return vcvtaq_s16_f16(a);
179 }
180 
181 // CHECK-LABEL: test_vcvtm_s16_f16
182 // CHECK:  [[VCVT:%.*]] = call <4 x i16> @llvm.aarch64.neon.fcvtms.v4i16.v4f16(<4 x half> %a)
183 // CHECK:  ret <4 x i16> [[VCVT]]
test_vcvtm_s16_f16(float16x4_t a)184 int16x4_t test_vcvtm_s16_f16 (float16x4_t a) {
185   return vcvtm_s16_f16(a);
186 }
187 
188 // CHECK-LABEL: test_vcvtmq_s16_f16
189 // CHECK:  [[VCVT:%.*]] = call <8 x i16> @llvm.aarch64.neon.fcvtms.v8i16.v8f16(<8 x half> %a)
190 // CHECK:  ret <8 x i16> [[VCVT]]
test_vcvtmq_s16_f16(float16x8_t a)191 int16x8_t test_vcvtmq_s16_f16 (float16x8_t a) {
192   return vcvtmq_s16_f16(a);
193 }
194 
195 // CHECK-LABEL: test_vcvtm_u16_f16
196 // CHECK:  [[VCVT:%.*]] = call <4 x i16> @llvm.aarch64.neon.fcvtmu.v4i16.v4f16(<4 x half> %a)
197 // CHECK:  ret <4 x i16> [[VCVT]]
test_vcvtm_u16_f16(float16x4_t a)198 uint16x4_t test_vcvtm_u16_f16 (float16x4_t a) {
199   return vcvtm_u16_f16(a);
200 }
201 
202 // CHECK-LABEL: test_vcvtmq_u16_f16
203 // CHECK:  [[VCVT:%.*]] = call <8 x i16> @llvm.aarch64.neon.fcvtmu.v8i16.v8f16(<8 x half> %a)
204 // CHECK:  ret <8 x i16> [[VCVT]]
test_vcvtmq_u16_f16(float16x8_t a)205 uint16x8_t test_vcvtmq_u16_f16 (float16x8_t a) {
206   return vcvtmq_u16_f16(a);
207 }
208 
209 // CHECK-LABEL: test_vcvtn_s16_f16
210 // CHECK:  [[VCVT:%.*]] = call <4 x i16> @llvm.aarch64.neon.fcvtns.v4i16.v4f16(<4 x half> %a)
211 // CHECK:  ret <4 x i16> [[VCVT]]
test_vcvtn_s16_f16(float16x4_t a)212 int16x4_t test_vcvtn_s16_f16 (float16x4_t a) {
213   return vcvtn_s16_f16(a);
214 }
215 
216 // CHECK-LABEL: test_vcvtnq_s16_f16
217 // CHECK:  [[VCVT:%.*]] = call <8 x i16> @llvm.aarch64.neon.fcvtns.v8i16.v8f16(<8 x half> %a)
218 // CHECK:  ret <8 x i16> [[VCVT]]
test_vcvtnq_s16_f16(float16x8_t a)219 int16x8_t test_vcvtnq_s16_f16 (float16x8_t a) {
220   return vcvtnq_s16_f16(a);
221 }
222 
223 // CHECK-LABEL: test_vcvtn_u16_f16
224 // CHECK:  [[VCVT:%.*]] = call <4 x i16> @llvm.aarch64.neon.fcvtnu.v4i16.v4f16(<4 x half> %a)
225 // CHECK:  ret <4 x i16> [[VCVT]]
test_vcvtn_u16_f16(float16x4_t a)226 uint16x4_t test_vcvtn_u16_f16 (float16x4_t a) {
227   return vcvtn_u16_f16(a);
228 }
229 
230 // CHECK-LABEL: test_vcvtnq_u16_f16
231 // CHECK:  [[VCVT:%.*]] = call <8 x i16> @llvm.aarch64.neon.fcvtnu.v8i16.v8f16(<8 x half> %a)
232 // CHECK:  ret <8 x i16> [[VCVT]]
test_vcvtnq_u16_f16(float16x8_t a)233 uint16x8_t test_vcvtnq_u16_f16 (float16x8_t a) {
234   return vcvtnq_u16_f16(a);
235 }
236 
237 // CHECK-LABEL: test_vcvtp_s16_f16
238 // CHECK:  [[VCVT:%.*]] = call <4 x i16> @llvm.aarch64.neon.fcvtps.v4i16.v4f16(<4 x half> %a)
239 // CHECK:  ret <4 x i16> [[VCVT]]
test_vcvtp_s16_f16(float16x4_t a)240 int16x4_t test_vcvtp_s16_f16 (float16x4_t a) {
241   return vcvtp_s16_f16(a);
242 }
243 
244 // CHECK-LABEL: test_vcvtpq_s16_f16
245 // CHECK:  [[VCVT:%.*]] = call <8 x i16> @llvm.aarch64.neon.fcvtps.v8i16.v8f16(<8 x half> %a)
246 // CHECK:  ret <8 x i16> [[VCVT]]
test_vcvtpq_s16_f16(float16x8_t a)247 int16x8_t test_vcvtpq_s16_f16 (float16x8_t a) {
248   return vcvtpq_s16_f16(a);
249 }
250 
251 // CHECK-LABEL: test_vcvtp_u16_f16
252 // CHECK:  [[VCVT:%.*]] = call <4 x i16> @llvm.aarch64.neon.fcvtpu.v4i16.v4f16(<4 x half> %a)
253 // CHECK:  ret <4 x i16> [[VCVT]]
test_vcvtp_u16_f16(float16x4_t a)254 uint16x4_t test_vcvtp_u16_f16 (float16x4_t a) {
255   return vcvtp_u16_f16(a);
256 }
257 
258 // CHECK-LABEL: test_vcvtpq_u16_f16
259 // CHECK:  [[VCVT:%.*]] = call <8 x i16> @llvm.aarch64.neon.fcvtpu.v8i16.v8f16(<8 x half> %a)
260 // CHECK:  ret <8 x i16> [[VCVT]]
test_vcvtpq_u16_f16(float16x8_t a)261 uint16x8_t test_vcvtpq_u16_f16 (float16x8_t a) {
262   return vcvtpq_u16_f16(a);
263 }
264 
265 // FIXME: Fix the zero constant when fp16 non-storage-only type becomes available.
266 // CHECK-LABEL: test_vneg_f16
267 // CHECK:  [[NEG:%.*]] = fsub <4 x half> <half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000>, %a
268 // CHECK:  ret <4 x half> [[NEG]]
test_vneg_f16(float16x4_t a)269 float16x4_t test_vneg_f16(float16x4_t a) {
270   return vneg_f16(a);
271 }
272 
273 // CHECK-LABEL: test_vnegq_f16
274 // CHECK:  [[NEG:%.*]] = fsub <8 x half> <half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000>, %a
275 // CHECK:  ret <8 x half> [[NEG]]
test_vnegq_f16(float16x8_t a)276 float16x8_t test_vnegq_f16(float16x8_t a) {
277   return vnegq_f16(a);
278 }
279 
280 // CHECK-LABEL: test_vrecpe_f16
281 // CHECK:  [[RCP:%.*]] = call <4 x half> @llvm.aarch64.neon.frecpe.v4f16(<4 x half> %a)
282 // CHECK:  ret <4 x half> [[RCP]]
test_vrecpe_f16(float16x4_t a)283 float16x4_t test_vrecpe_f16(float16x4_t a) {
284   return vrecpe_f16(a);
285 }
286 
287 // CHECK-LABEL: test_vrecpeq_f16
288 // CHECK:  [[RCP:%.*]] = call <8 x half> @llvm.aarch64.neon.frecpe.v8f16(<8 x half> %a)
289 // CHECK:  ret <8 x half> [[RCP]]
test_vrecpeq_f16(float16x8_t a)290 float16x8_t test_vrecpeq_f16(float16x8_t a) {
291   return vrecpeq_f16(a);
292 }
293 
294 // CHECK-LABEL: test_vrnd_f16
295 // CHECK:  [[RND:%.*]] =  call <4 x half> @llvm.trunc.v4f16(<4 x half> %a)
296 // CHECK:  ret <4 x half> [[RND]]
test_vrnd_f16(float16x4_t a)297 float16x4_t test_vrnd_f16(float16x4_t a) {
298   return vrnd_f16(a);
299 }
300 
301 // CHECK-LABEL: test_vrndq_f16
302 // CHECK:  [[RND:%.*]] =  call <8 x half> @llvm.trunc.v8f16(<8 x half> %a)
303 // CHECK:  ret <8 x half> [[RND]]
test_vrndq_f16(float16x8_t a)304 float16x8_t test_vrndq_f16(float16x8_t a) {
305   return vrndq_f16(a);
306 }
307 
308 // CHECK-LABEL: test_vrnda_f16
309 // CHECK:  [[RND:%.*]] =  call <4 x half> @llvm.round.v4f16(<4 x half> %a)
310 // CHECK:  ret <4 x half> [[RND]]
test_vrnda_f16(float16x4_t a)311 float16x4_t test_vrnda_f16(float16x4_t a) {
312   return vrnda_f16(a);
313 }
314 
315 // CHECK-LABEL: test_vrndaq_f16
316 // CHECK:  [[RND:%.*]] =  call <8 x half> @llvm.round.v8f16(<8 x half> %a)
317 // CHECK:  ret <8 x half> [[RND]]
test_vrndaq_f16(float16x8_t a)318 float16x8_t test_vrndaq_f16(float16x8_t a) {
319   return vrndaq_f16(a);
320 }
321 
322 // CHECK-LABEL: test_vrndi_f16
323 // CHECK:  [[RND:%.*]] =  call <4 x half> @llvm.nearbyint.v4f16(<4 x half> %a)
324 // CHECK:  ret <4 x half> [[RND]]
test_vrndi_f16(float16x4_t a)325 float16x4_t test_vrndi_f16(float16x4_t a) {
326   return vrndi_f16(a);
327 }
328 
329 // CHECK-LABEL: test_vrndiq_f16
330 // CHECK:  [[RND:%.*]] =  call <8 x half> @llvm.nearbyint.v8f16(<8 x half> %a)
331 // CHECK:  ret <8 x half> [[RND]]
test_vrndiq_f16(float16x8_t a)332 float16x8_t test_vrndiq_f16(float16x8_t a) {
333   return vrndiq_f16(a);
334 }
335 
336 // CHECK-LABEL: test_vrndm_f16
337 // CHECK:  [[RND:%.*]] =  call <4 x half> @llvm.floor.v4f16(<4 x half> %a)
338 // CHECK:  ret <4 x half> [[RND]]
test_vrndm_f16(float16x4_t a)339 float16x4_t test_vrndm_f16(float16x4_t a) {
340   return vrndm_f16(a);
341 }
342 
343 // CHECK-LABEL: test_vrndmq_f16
344 // CHECK:  [[RND:%.*]] =  call <8 x half> @llvm.floor.v8f16(<8 x half> %a)
345 // CHECK:  ret <8 x half> [[RND]]
test_vrndmq_f16(float16x8_t a)346 float16x8_t test_vrndmq_f16(float16x8_t a) {
347   return vrndmq_f16(a);
348 }
349 
350 // CHECK-LABEL: test_vrndn_f16
351 // CHECK:  [[RND:%.*]] =  call <4 x half> @llvm.aarch64.neon.frintn.v4f16(<4 x half> %a)
352 // CHECK:  ret <4 x half> [[RND]]
test_vrndn_f16(float16x4_t a)353 float16x4_t test_vrndn_f16(float16x4_t a) {
354   return vrndn_f16(a);
355 }
356 
357 // CHECK-LABEL: test_vrndnq_f16
358 // CHECK:  [[RND:%.*]] =  call <8 x half> @llvm.aarch64.neon.frintn.v8f16(<8 x half> %a)
359 // CHECK:  ret <8 x half> [[RND]]
test_vrndnq_f16(float16x8_t a)360 float16x8_t test_vrndnq_f16(float16x8_t a) {
361   return vrndnq_f16(a);
362 }
363 
364 // CHECK-LABEL: test_vrndp_f16
365 // CHECK:  [[RND:%.*]] =  call <4 x half> @llvm.ceil.v4f16(<4 x half> %a)
366 // CHECK:  ret <4 x half> [[RND]]
test_vrndp_f16(float16x4_t a)367 float16x4_t test_vrndp_f16(float16x4_t a) {
368   return vrndp_f16(a);
369 }
370 
371 // CHECK-LABEL: test_vrndpq_f16
372 // CHECK:  [[RND:%.*]] =  call <8 x half> @llvm.ceil.v8f16(<8 x half> %a)
373 // CHECK:  ret <8 x half> [[RND]]
test_vrndpq_f16(float16x8_t a)374 float16x8_t test_vrndpq_f16(float16x8_t a) {
375   return vrndpq_f16(a);
376 }
377 
378 // CHECK-LABEL: test_vrndx_f16
379 // CHECK:  [[RND:%.*]] =  call <4 x half> @llvm.rint.v4f16(<4 x half> %a)
380 // CHECK:  ret <4 x half> [[RND]]
test_vrndx_f16(float16x4_t a)381 float16x4_t test_vrndx_f16(float16x4_t a) {
382   return vrndx_f16(a);
383 }
384 
385 // CHECK-LABEL: test_vrndxq_f16
386 // CHECK:  [[RND:%.*]] =  call <8 x half> @llvm.rint.v8f16(<8 x half> %a)
387 // CHECK:  ret <8 x half> [[RND]]
test_vrndxq_f16(float16x8_t a)388 float16x8_t test_vrndxq_f16(float16x8_t a) {
389   return vrndxq_f16(a);
390 }
391 
392 // CHECK-LABEL: test_vrsqrte_f16
393 // CHECK:  [[RND:%.*]] = call <4 x half> @llvm.aarch64.neon.frsqrte.v4f16(<4 x half> %a)
394 // CHECK:  ret <4 x half> [[RND]]
test_vrsqrte_f16(float16x4_t a)395 float16x4_t test_vrsqrte_f16(float16x4_t a) {
396   return vrsqrte_f16(a);
397 }
398 
399 // CHECK-LABEL: test_vrsqrteq_f16
400 // CHECK:  [[RND:%.*]] = call <8 x half> @llvm.aarch64.neon.frsqrte.v8f16(<8 x half> %a)
401 // CHECK:  ret <8 x half> [[RND]]
test_vrsqrteq_f16(float16x8_t a)402 float16x8_t test_vrsqrteq_f16(float16x8_t a) {
403   return vrsqrteq_f16(a);
404 }
405 
406 // CHECK-LABEL: test_vsqrt_f16
407 // CHECK:  [[SQR:%.*]] = call <4 x half> @llvm.sqrt.v4f16(<4 x half> %a)
408 // CHECK:  ret <4 x half> [[SQR]]
test_vsqrt_f16(float16x4_t a)409 float16x4_t test_vsqrt_f16(float16x4_t a) {
410   return vsqrt_f16(a);
411 }
412 
413 // CHECK-LABEL: test_vsqrtq_f16
414 // CHECK:  [[SQR:%.*]] = call <8 x half> @llvm.sqrt.v8f16(<8 x half> %a)
415 // CHECK:  ret <8 x half> [[SQR]]
test_vsqrtq_f16(float16x8_t a)416 float16x8_t test_vsqrtq_f16(float16x8_t a) {
417   return vsqrtq_f16(a);
418 }
419 
420 // CHECK-LABEL: test_vadd_f16
421 // CHECK:  [[ADD:%.*]] = fadd <4 x half> %a, %b
422 // CHECK:  ret <4 x half> [[ADD]]
test_vadd_f16(float16x4_t a,float16x4_t b)423 float16x4_t test_vadd_f16(float16x4_t a, float16x4_t b) {
424   return vadd_f16(a, b);
425 }
426 
427 // CHECK-LABEL: test_vaddq_f16
428 // CHECK:  [[ADD:%.*]] = fadd <8 x half> %a, %b
429 // CHECK:  ret <8 x half> [[ADD]]
test_vaddq_f16(float16x8_t a,float16x8_t b)430 float16x8_t test_vaddq_f16(float16x8_t a, float16x8_t b) {
431   return vaddq_f16(a, b);
432 }
433 
434 // CHECK-LABEL: test_vabd_f16
435 // CHECK:  [[ABD:%.*]] = call <4 x half> @llvm.aarch64.neon.fabd.v4f16(<4 x half> %a, <4 x half> %b)
436 // CHECK:  ret <4 x half> [[ABD]]
test_vabd_f16(float16x4_t a,float16x4_t b)437 float16x4_t test_vabd_f16(float16x4_t a, float16x4_t b) {
438   return vabd_f16(a, b);
439 }
440 
441 // CHECK-LABEL: test_vabdq_f16
442 // CHECK:  [[ABD:%.*]] = call <8 x half> @llvm.aarch64.neon.fabd.v8f16(<8 x half> %a, <8 x half> %b)
443 // CHECK:  ret <8 x half> [[ABD]]
test_vabdq_f16(float16x8_t a,float16x8_t b)444 float16x8_t test_vabdq_f16(float16x8_t a, float16x8_t b) {
445   return vabdq_f16(a, b);
446 }
447 
448 // CHECK-LABEL: test_vcage_f16
449 // CHECK:  [[ABS:%.*]] = call <4 x i16> @llvm.aarch64.neon.facge.v4i16.v4f16(<4 x half> %a, <4 x half> %b)
450 // CHECK:  ret <4 x i16> [[ABS]]
test_vcage_f16(float16x4_t a,float16x4_t b)451 uint16x4_t test_vcage_f16(float16x4_t a, float16x4_t b) {
452   return vcage_f16(a, b);
453 }
454 
455 // CHECK-LABEL: test_vcageq_f16
456 // CHECK:  [[ABS:%.*]] = call <8 x i16> @llvm.aarch64.neon.facge.v8i16.v8f16(<8 x half> %a, <8 x half> %b)
457 // CHECK:  ret <8 x i16> [[ABS]]
test_vcageq_f16(float16x8_t a,float16x8_t b)458 uint16x8_t test_vcageq_f16(float16x8_t a, float16x8_t b) {
459   return vcageq_f16(a, b);
460 }
461 
462 // CHECK-LABEL: test_vcagt_f16
463 // CHECK:  [[ABS:%.*]] = call <4 x i16> @llvm.aarch64.neon.facgt.v4i16.v4f16(<4 x half> %a, <4 x half> %b)
464 // CHECK:  ret <4 x i16> [[ABS]]
test_vcagt_f16(float16x4_t a,float16x4_t b)465 uint16x4_t test_vcagt_f16(float16x4_t a, float16x4_t b) {
466   return vcagt_f16(a, b);
467 }
468 
469 // CHECK-LABEL: test_vcagtq_f16
470 // CHECK:  [[ABS:%.*]] = call <8 x i16> @llvm.aarch64.neon.facgt.v8i16.v8f16(<8 x half> %a, <8 x half> %b)
471 // CHECK:  ret <8 x i16> [[ABS]]
test_vcagtq_f16(float16x8_t a,float16x8_t b)472 uint16x8_t test_vcagtq_f16(float16x8_t a, float16x8_t b) {
473   return vcagtq_f16(a, b);
474 }
475 
476 // CHECK-LABEL: test_vcale_f16
477 // CHECK:  [[ABS:%.*]] = call <4 x i16> @llvm.aarch64.neon.facge.v4i16.v4f16(<4 x half> %b, <4 x half> %a)
478 // CHECK:  ret <4 x i16> [[ABS]]
test_vcale_f16(float16x4_t a,float16x4_t b)479 uint16x4_t test_vcale_f16(float16x4_t a, float16x4_t b) {
480   return vcale_f16(a, b);
481 }
482 
483 // CHECK-LABEL: test_vcaleq_f16
484 // CHECK:  [[ABS:%.*]] = call <8 x i16> @llvm.aarch64.neon.facge.v8i16.v8f16(<8 x half> %b, <8 x half> %a)
485 // CHECK:  ret <8 x i16> [[ABS]]
test_vcaleq_f16(float16x8_t a,float16x8_t b)486 uint16x8_t test_vcaleq_f16(float16x8_t a, float16x8_t b) {
487   return vcaleq_f16(a, b);
488 }
489 
490 // CHECK-LABEL: test_vcalt_f16
491 // CHECK:  [[ABS:%.*]] = call <4 x i16> @llvm.aarch64.neon.facgt.v4i16.v4f16(<4 x half> %b, <4 x half> %a)
492 // CHECK:  ret <4 x i16> [[ABS]]
test_vcalt_f16(float16x4_t a,float16x4_t b)493 uint16x4_t test_vcalt_f16(float16x4_t a, float16x4_t b) {
494   return vcalt_f16(a, b);
495 }
496 
497 // CHECK-LABEL: test_vcaltq_f16
498 // CHECK:  [[ABS:%.*]] = call <8 x i16> @llvm.aarch64.neon.facgt.v8i16.v8f16(<8 x half> %b, <8 x half> %a)
499 // CHECK:  ret <8 x i16> [[ABS]]
test_vcaltq_f16(float16x8_t a,float16x8_t b)500 uint16x8_t test_vcaltq_f16(float16x8_t a, float16x8_t b) {
501   return vcaltq_f16(a, b);
502 }
503 
504 // CHECK-LABEL: test_vceq_f16
505 // CHECK:  [[TMP1:%.*]] = fcmp oeq <4 x half> %a, %b
506 // CHECK:  [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
507 // CHECK:  ret <4 x i16> [[TMP2]]
test_vceq_f16(float16x4_t a,float16x4_t b)508 uint16x4_t test_vceq_f16(float16x4_t a, float16x4_t b) {
509   return vceq_f16(a, b);
510 }
511 
512 // CHECK-LABEL: test_vceqq_f16
513 // CHECK:  [[TMP1:%.*]] = fcmp oeq <8 x half> %a, %b
514 // CHECK:  [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16>
515 // CHECK:  ret <8 x i16> [[TMP2]]
test_vceqq_f16(float16x8_t a,float16x8_t b)516 uint16x8_t test_vceqq_f16(float16x8_t a, float16x8_t b) {
517   return vceqq_f16(a, b);
518 }
519 
520 // CHECK-LABEL: test_vcge_f16
521 // CHECK:  [[TMP1:%.*]] = fcmp oge <4 x half> %a, %b
522 // CHECK:  [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
523 // CHECK:  ret <4 x i16> [[TMP2]]
test_vcge_f16(float16x4_t a,float16x4_t b)524 uint16x4_t test_vcge_f16(float16x4_t a, float16x4_t b) {
525   return vcge_f16(a, b);
526 }
527 
528 // CHECK-LABEL: test_vcgeq_f16
529 // CHECK:  [[TMP1:%.*]] = fcmp oge <8 x half> %a, %b
530 // CHECK:  [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16>
531 // CHECK:  ret <8 x i16> [[TMP2]]
test_vcgeq_f16(float16x8_t a,float16x8_t b)532 uint16x8_t test_vcgeq_f16(float16x8_t a, float16x8_t b) {
533   return vcgeq_f16(a, b);
534 }
535 
536 // CHECK-LABEL: test_vcgt_f16
537 // CHECK:  [[TMP1:%.*]] = fcmp ogt <4 x half> %a, %b
538 // CHECK:  [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
539 // CHECK:  ret <4 x i16> [[TMP2]]
test_vcgt_f16(float16x4_t a,float16x4_t b)540 uint16x4_t test_vcgt_f16(float16x4_t a, float16x4_t b) {
541   return vcgt_f16(a, b);
542 }
543 
544 // CHECK-LABEL: test_vcgtq_f16
545 // CHECK:  [[TMP1:%.*]] = fcmp ogt <8 x half> %a, %b
546 // CHECK:  [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16>
547 // CHECK:  ret <8 x i16> [[TMP2]]
test_vcgtq_f16(float16x8_t a,float16x8_t b)548 uint16x8_t test_vcgtq_f16(float16x8_t a, float16x8_t b) {
549   return vcgtq_f16(a, b);
550 }
551 
552 // CHECK-LABEL: test_vcle_f16
553 // CHECK:  [[TMP1:%.*]] = fcmp ole <4 x half> %a, %b
554 // CHECK:  [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
555 // CHECK:  ret <4 x i16> [[TMP2]]
test_vcle_f16(float16x4_t a,float16x4_t b)556 uint16x4_t test_vcle_f16(float16x4_t a, float16x4_t b) {
557   return vcle_f16(a, b);
558 }
559 
560 // CHECK-LABEL: test_vcleq_f16
561 // CHECK:  [[TMP1:%.*]] = fcmp ole <8 x half> %a, %b
562 // CHECK:  [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16>
563 // CHECK:  ret <8 x i16> [[TMP2]]
test_vcleq_f16(float16x8_t a,float16x8_t b)564 uint16x8_t test_vcleq_f16(float16x8_t a, float16x8_t b) {
565   return vcleq_f16(a, b);
566 }
567 
568 // CHECK-LABEL: test_vclt_f16
569 // CHECK:  [[TMP1:%.*]] = fcmp olt <4 x half> %a, %b
570 // CHECK:  [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
571 // CHECK:  ret <4 x i16> [[TMP2]]
test_vclt_f16(float16x4_t a,float16x4_t b)572 uint16x4_t test_vclt_f16(float16x4_t a, float16x4_t b) {
573   return vclt_f16(a, b);
574 }
575 
576 // CHECK-LABEL: test_vcltq_f16
577 // CHECK:  [[TMP1:%.*]] = fcmp olt <8 x half> %a, %b
578 // CHECK:  [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16>
579 // CHECK:  ret <8 x i16> [[TMP2]]
test_vcltq_f16(float16x8_t a,float16x8_t b)580 uint16x8_t test_vcltq_f16(float16x8_t a, float16x8_t b) {
581   return vcltq_f16(a, b);
582 }
583 
584 // CHECK-LABEL: test_vcvt_n_f16_s16
585 // CHECK:  [[CVT:%.*]] = call <4 x half> @llvm.aarch64.neon.vcvtfxs2fp.v4f16.v4i16(<4 x i16> %vcvt_n, i32 2)
586 // CHECK:  ret <4 x half> [[CVT]]
test_vcvt_n_f16_s16(int16x4_t a)587 float16x4_t test_vcvt_n_f16_s16(int16x4_t a) {
588   return vcvt_n_f16_s16(a, 2);
589 }
590 
591 // CHECK-LABEL: test_vcvtq_n_f16_s16
592 // CHECK:  [[CVT:%.*]] = call <8 x half> @llvm.aarch64.neon.vcvtfxs2fp.v8f16.v8i16(<8 x i16> %vcvt_n, i32 2)
593 // CHECK:  ret <8 x half> [[CVT]]
test_vcvtq_n_f16_s16(int16x8_t a)594 float16x8_t test_vcvtq_n_f16_s16(int16x8_t a) {
595   return vcvtq_n_f16_s16(a, 2);
596 }
597 
598 // CHECK-LABEL: test_vcvt_n_f16_u16
599 // CHECK:  [[CVT:%.*]] = call <4 x half> @llvm.aarch64.neon.vcvtfxu2fp.v4f16.v4i16(<4 x i16> %vcvt_n, i32 2)
600 // CHECK:  ret <4 x half> [[CVT]]
test_vcvt_n_f16_u16(uint16x4_t a)601 float16x4_t test_vcvt_n_f16_u16(uint16x4_t a) {
602   return vcvt_n_f16_u16(a, 2);
603 }
604 
605 // CHECK-LABEL: test_vcvtq_n_f16_u16
606 // CHECK:  [[CVT:%.*]] = call <8 x half> @llvm.aarch64.neon.vcvtfxu2fp.v8f16.v8i16(<8 x i16> %vcvt_n, i32 2)
607 // CHECK:  ret <8 x half> [[CVT]]
test_vcvtq_n_f16_u16(uint16x8_t a)608 float16x8_t test_vcvtq_n_f16_u16(uint16x8_t a) {
609   return vcvtq_n_f16_u16(a, 2);
610 }
611 
612 // CHECK-LABEL: test_vcvt_n_s16_f16
613 // CHECK:  [[CVT:%.*]] = call <4 x i16> @llvm.aarch64.neon.vcvtfp2fxs.v4i16.v4f16(<4 x half> %vcvt_n, i32 2)
614 // CHECK:  ret <4 x i16> [[CVT]]
test_vcvt_n_s16_f16(float16x4_t a)615 int16x4_t test_vcvt_n_s16_f16(float16x4_t a) {
616   return vcvt_n_s16_f16(a, 2);
617 }
618 
619 // CHECK-LABEL: test_vcvtq_n_s16_f16
620 // CHECK:  [[CVT:%.*]] = call <8 x i16> @llvm.aarch64.neon.vcvtfp2fxs.v8i16.v8f16(<8 x half> %vcvt_n, i32 2)
621 // CHECK:  ret <8 x i16> [[CVT]]
test_vcvtq_n_s16_f16(float16x8_t a)622 int16x8_t test_vcvtq_n_s16_f16(float16x8_t a) {
623   return vcvtq_n_s16_f16(a, 2);
624 }
625 
626 // CHECK-LABEL: test_vcvt_n_u16_f16
627 // CHECK:  [[CVT:%.*]] = call <4 x i16> @llvm.aarch64.neon.vcvtfp2fxu.v4i16.v4f16(<4 x half> %vcvt_n, i32 2)
628 // CHECK:  ret <4 x i16> [[CVT]]
test_vcvt_n_u16_f16(float16x4_t a)629 uint16x4_t test_vcvt_n_u16_f16(float16x4_t a) {
630   return vcvt_n_u16_f16(a, 2);
631 }
632 
633 // CHECK-LABEL: test_vcvtq_n_u16_f16
634 // CHECK:  [[CVT:%.*]] = call <8 x i16> @llvm.aarch64.neon.vcvtfp2fxu.v8i16.v8f16(<8 x half> %vcvt_n, i32 2)
635 // CHECK:  ret <8 x i16> [[CVT]]
test_vcvtq_n_u16_f16(float16x8_t a)636 uint16x8_t test_vcvtq_n_u16_f16(float16x8_t a) {
637   return vcvtq_n_u16_f16(a, 2);
638 }
639 
640 // CHECK-LABEL: test_vdiv_f16
641 // CHECK:  [[DIV:%.*]] = fdiv <4 x half> %a, %b
642 // CHECK:  ret <4 x half> [[DIV]]
test_vdiv_f16(float16x4_t a,float16x4_t b)643 float16x4_t test_vdiv_f16(float16x4_t a, float16x4_t b) {
644   return vdiv_f16(a, b);
645 }
646 
647 // CHECK-LABEL: test_vdivq_f16
648 // CHECK:  [[DIV:%.*]] = fdiv <8 x half> %a, %b
649 // CHECK:  ret <8 x half> [[DIV]]
test_vdivq_f16(float16x8_t a,float16x8_t b)650 float16x8_t test_vdivq_f16(float16x8_t a, float16x8_t b) {
651   return vdivq_f16(a, b);
652 }
653 
654 // CHECK-LABEL: test_vmax_f16
655 // CHECK:  [[MAX:%.*]] = call <4 x half> @llvm.aarch64.neon.fmax.v4f16(<4 x half> %a, <4 x half> %b)
656 // CHECK:  ret <4 x half> [[MAX]]
test_vmax_f16(float16x4_t a,float16x4_t b)657 float16x4_t test_vmax_f16(float16x4_t a, float16x4_t b) {
658   return vmax_f16(a, b);
659 }
660 
661 // CHECK-LABEL: test_vmaxq_f16
662 // CHECK:  [[MAX:%.*]] = call <8 x half> @llvm.aarch64.neon.fmax.v8f16(<8 x half> %a, <8 x half> %b)
663 // CHECK:  ret <8 x half> [[MAX]]
test_vmaxq_f16(float16x8_t a,float16x8_t b)664 float16x8_t test_vmaxq_f16(float16x8_t a, float16x8_t b) {
665   return vmaxq_f16(a, b);
666 }
667 
668 // CHECK-LABEL: test_vmaxnm_f16
669 // CHECK:  [[MAX:%.*]] = call <4 x half> @llvm.aarch64.neon.fmaxnm.v4f16(<4 x half> %a, <4 x half> %b)
670 // CHECK:  ret <4 x half> [[MAX]]
test_vmaxnm_f16(float16x4_t a,float16x4_t b)671 float16x4_t test_vmaxnm_f16(float16x4_t a, float16x4_t b) {
672   return vmaxnm_f16(a, b);
673 }
674 
675 // CHECK-LABEL: test_vmaxnmq_f16
676 // CHECK:  [[MAX:%.*]] = call <8 x half> @llvm.aarch64.neon.fmaxnm.v8f16(<8 x half> %a, <8 x half> %b)
677 // CHECK:  ret <8 x half> [[MAX]]
test_vmaxnmq_f16(float16x8_t a,float16x8_t b)678 float16x8_t test_vmaxnmq_f16(float16x8_t a, float16x8_t b) {
679   return vmaxnmq_f16(a, b);
680 }
681 
682 // CHECK-LABEL: test_vmin_f16
683 // CHECK:  [[MIN:%.*]] = call <4 x half> @llvm.aarch64.neon.fmin.v4f16(<4 x half> %a, <4 x half> %b)
684 // CHECK:  ret <4 x half> [[MIN]]
test_vmin_f16(float16x4_t a,float16x4_t b)685 float16x4_t test_vmin_f16(float16x4_t a, float16x4_t b) {
686   return vmin_f16(a, b);
687 }
688 
689 // CHECK-LABEL: test_vminq_f16
690 // CHECK:  [[MIN:%.*]] = call <8 x half> @llvm.aarch64.neon.fmin.v8f16(<8 x half> %a, <8 x half> %b)
691 // CHECK:  ret <8 x half> [[MIN]]
test_vminq_f16(float16x8_t a,float16x8_t b)692 float16x8_t test_vminq_f16(float16x8_t a, float16x8_t b) {
693   return vminq_f16(a, b);
694 }
695 
696 // CHECK-LABEL: test_vminnm_f16
697 // CHECK:  [[MIN:%.*]] = call <4 x half> @llvm.aarch64.neon.fminnm.v4f16(<4 x half> %a, <4 x half> %b)
698 // CHECK:  ret <4 x half> [[MIN]]
test_vminnm_f16(float16x4_t a,float16x4_t b)699 float16x4_t test_vminnm_f16(float16x4_t a, float16x4_t b) {
700   return vminnm_f16(a, b);
701 }
702 
703 // CHECK-LABEL: test_vminnmq_f16
704 // CHECK:  [[MIN:%.*]] = call <8 x half> @llvm.aarch64.neon.fminnm.v8f16(<8 x half> %a, <8 x half> %b)
705 // CHECK:  ret <8 x half> [[MIN]]
test_vminnmq_f16(float16x8_t a,float16x8_t b)706 float16x8_t test_vminnmq_f16(float16x8_t a, float16x8_t b) {
707   return vminnmq_f16(a, b);
708 }
709 
710 // CHECK-LABEL: test_vmul_f16
711 // CHECK:  [[MUL:%.*]] = fmul <4 x half> %a, %b
712 // CHECK:  ret <4 x half> [[MUL]]
test_vmul_f16(float16x4_t a,float16x4_t b)713 float16x4_t test_vmul_f16(float16x4_t a, float16x4_t b) {
714   return vmul_f16(a, b);
715 }
716 
717 // CHECK-LABEL: test_vmulq_f16
718 // CHECK:  [[MUL:%.*]] = fmul <8 x half> %a, %b
719 // CHECK:  ret <8 x half> [[MUL]]
test_vmulq_f16(float16x8_t a,float16x8_t b)720 float16x8_t test_vmulq_f16(float16x8_t a, float16x8_t b) {
721   return vmulq_f16(a, b);
722 }
723 
724 // CHECK-LABEL: test_vmulx_f16
725 // CHECK:  [[MUL:%.*]] = call <4 x half> @llvm.aarch64.neon.fmulx.v4f16(<4 x half> %a, <4 x half> %b)
726 // CHECK:  ret <4 x half> [[MUL]]
test_vmulx_f16(float16x4_t a,float16x4_t b)727 float16x4_t test_vmulx_f16(float16x4_t a, float16x4_t b) {
728   return vmulx_f16(a, b);
729 }
730 
731 // CHECK-LABEL: test_vmulxq_f16
732 // CHECK:  [[MUL:%.*]] = call <8 x half> @llvm.aarch64.neon.fmulx.v8f16(<8 x half> %a, <8 x half> %b)
733 // CHECK:  ret <8 x half> [[MUL]]
test_vmulxq_f16(float16x8_t a,float16x8_t b)734 float16x8_t test_vmulxq_f16(float16x8_t a, float16x8_t b) {
735   return vmulxq_f16(a, b);
736 }
737 
738 // CHECK-LABEL: test_vpadd_f16
739 // CHECK:  [[ADD:%.*]] = call <4 x half> @llvm.aarch64.neon.addp.v4f16(<4 x half> %a, <4 x half> %b)
740 // CHECK:  ret <4 x half> [[ADD]]
test_vpadd_f16(float16x4_t a,float16x4_t b)741 float16x4_t test_vpadd_f16(float16x4_t a, float16x4_t b) {
742   return vpadd_f16(a, b);
743 }
744 
745 // CHECK-LABEL: test_vpaddq_f16
746 // CHECK:  [[ADD:%.*]] = call <8 x half> @llvm.aarch64.neon.addp.v8f16(<8 x half> %a, <8 x half> %b)
747 // CHECK:  ret <8 x half> [[ADD]]
test_vpaddq_f16(float16x8_t a,float16x8_t b)748 float16x8_t test_vpaddq_f16(float16x8_t a, float16x8_t b) {
749   return vpaddq_f16(a, b);
750 }
751 
752 // CHECK-LABEL: test_vpmax_f16
753 // CHECK:  [[MAX:%.*]] = call <4 x half> @llvm.aarch64.neon.fmaxp.v4f16(<4 x half> %a, <4 x half> %b)
754 // CHECK:  ret <4 x half> [[MAX]]
test_vpmax_f16(float16x4_t a,float16x4_t b)755 float16x4_t test_vpmax_f16(float16x4_t a, float16x4_t b) {
756   return vpmax_f16(a, b);
757 }
758 
759 // CHECK-LABEL: test_vpmaxq_f16
760 // CHECK:  [[MAX:%.*]] = call <8 x half> @llvm.aarch64.neon.fmaxp.v8f16(<8 x half> %a, <8 x half> %b)
761 // CHECK:  ret <8 x half> [[MAX]]
test_vpmaxq_f16(float16x8_t a,float16x8_t b)762 float16x8_t test_vpmaxq_f16(float16x8_t a, float16x8_t b) {
763   return vpmaxq_f16(a, b);
764 }
765 
766 // CHECK-LABEL: test_vpmaxnm_f16
767 // CHECK:  [[MAX:%.*]] = call <4 x half> @llvm.aarch64.neon.fmaxnmp.v4f16(<4 x half> %a, <4 x half> %b)
768 // CHECK:  ret <4 x half> [[MAX]]
test_vpmaxnm_f16(float16x4_t a,float16x4_t b)769 float16x4_t test_vpmaxnm_f16(float16x4_t a, float16x4_t b) {
770   return vpmaxnm_f16(a, b);
771 }
772 
773 // CHECK-LABEL: test_vpmaxnmq_f16
774 // CHECK:  [[MAX:%.*]] = call <8 x half> @llvm.aarch64.neon.fmaxnmp.v8f16(<8 x half> %a, <8 x half> %b)
775 // CHECK:  ret <8 x half> [[MAX]]
test_vpmaxnmq_f16(float16x8_t a,float16x8_t b)776 float16x8_t test_vpmaxnmq_f16(float16x8_t a, float16x8_t b) {
777   return vpmaxnmq_f16(a, b);
778 }
779 
780 // CHECK-LABEL: test_vpmin_f16
781 // CHECK:  [[MIN:%.*]] = call <4 x half> @llvm.aarch64.neon.fminp.v4f16(<4 x half> %a, <4 x half> %b)
782 // CHECK:  ret <4 x half> [[MIN]]
test_vpmin_f16(float16x4_t a,float16x4_t b)783 float16x4_t test_vpmin_f16(float16x4_t a, float16x4_t b) {
784   return vpmin_f16(a, b);
785 }
786 
787 // CHECK-LABEL: test_vpminq_f16
788 // CHECK:  [[MIN:%.*]] = call <8 x half> @llvm.aarch64.neon.fminp.v8f16(<8 x half> %a, <8 x half> %b)
789 // CHECK:  ret <8 x half> [[MIN]]
test_vpminq_f16(float16x8_t a,float16x8_t b)790 float16x8_t test_vpminq_f16(float16x8_t a, float16x8_t b) {
791   return vpminq_f16(a, b);
792 }
793 
794 // CHECK-LABEL: test_vpminnm_f16
795 // CHECK:  [[MIN:%.*]] = call <4 x half> @llvm.aarch64.neon.fminnmp.v4f16(<4 x half> %a, <4 x half> %b)
796 // CHECK:  ret <4 x half> [[MIN]]
test_vpminnm_f16(float16x4_t a,float16x4_t b)797 float16x4_t test_vpminnm_f16(float16x4_t a, float16x4_t b) {
798   return vpminnm_f16(a, b);
799 }
800 
801 // CHECK-LABEL: test_vpminnmq_f16
802 // CHECK:  [[MIN:%.*]] = call <8 x half> @llvm.aarch64.neon.fminnmp.v8f16(<8 x half> %a, <8 x half> %b)
803 // CHECK:  ret <8 x half> [[MIN]]
test_vpminnmq_f16(float16x8_t a,float16x8_t b)804 float16x8_t test_vpminnmq_f16(float16x8_t a, float16x8_t b) {
805   return vpminnmq_f16(a, b);
806 }
807 
808 // CHECK-LABEL: test_vrecps_f16
809 // CHECK:  [[MIN:%.*]] = call <4 x half> @llvm.aarch64.neon.frecps.v4f16(<4 x half> %a, <4 x half> %b)
810 // CHECK:  ret <4 x half> [[MIN]]
test_vrecps_f16(float16x4_t a,float16x4_t b)811 float16x4_t test_vrecps_f16(float16x4_t a, float16x4_t b) {
812   return vrecps_f16(a, b);
813 }
814 
815 // CHECK-LABEL: test_vrecpsq_f16
816 // CHECK:  [[MIN:%.*]] =  call <8 x half> @llvm.aarch64.neon.frecps.v8f16(<8 x half> %a, <8 x half> %b)
817 // CHECK:  ret <8 x half> [[MIN]]
test_vrecpsq_f16(float16x8_t a,float16x8_t b)818 float16x8_t test_vrecpsq_f16(float16x8_t a, float16x8_t b) {
819   return vrecpsq_f16(a, b);
820 }
821 
822 // CHECK-LABEL: test_vrsqrts_f16
823 // CHECK:  [[MIN:%.*]] = call <4 x half> @llvm.aarch64.neon.frsqrts.v4f16(<4 x half> %a, <4 x half> %b)
824 // CHECK:  ret <4 x half> [[MIN]]
test_vrsqrts_f16(float16x4_t a,float16x4_t b)825 float16x4_t test_vrsqrts_f16(float16x4_t a, float16x4_t b) {
826   return vrsqrts_f16(a, b);
827 }
828 
829 // CHECK-LABEL: test_vrsqrtsq_f16
830 // CHECK:  [[MIN:%.*]] =  call <8 x half> @llvm.aarch64.neon.frsqrts.v8f16(<8 x half> %a, <8 x half> %b)
831 // CHECK:  ret <8 x half> [[MIN]]
test_vrsqrtsq_f16(float16x8_t a,float16x8_t b)832 float16x8_t test_vrsqrtsq_f16(float16x8_t a, float16x8_t b) {
833   return vrsqrtsq_f16(a, b);
834 }
835 
836 // CHECK-LABEL: test_vsub_f16
837 // CHECK:  [[ADD:%.*]] = fsub <4 x half> %a, %b
838 // CHECK:  ret <4 x half> [[ADD]]
test_vsub_f16(float16x4_t a,float16x4_t b)839 float16x4_t test_vsub_f16(float16x4_t a, float16x4_t b) {
840   return vsub_f16(a, b);
841 }
842 
843 // CHECK-LABEL: test_vsubq_f16
844 // CHECK:  [[ADD:%.*]] = fsub <8 x half> %a, %b
845 // CHECK:  ret <8 x half> [[ADD]]
test_vsubq_f16(float16x8_t a,float16x8_t b)846 float16x8_t test_vsubq_f16(float16x8_t a, float16x8_t b) {
847   return vsubq_f16(a, b);
848 }
849 
850 // CHECK-LABEL: test_vfma_f16
851 // CHECK:  [[ADD:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> %b, <4 x half> %c, <4 x half> %a)
852 // CHECK:  ret <4 x half> [[ADD]]
test_vfma_f16(float16x4_t a,float16x4_t b,float16x4_t c)853 float16x4_t test_vfma_f16(float16x4_t a, float16x4_t b, float16x4_t c) {
854   return vfma_f16(a, b, c);
855 }
856 
857 // CHECK-LABEL: test_vfmaq_f16
858 // CHECK:  [[ADD:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> %b, <8 x half> %c, <8 x half> %a)
859 // CHECK:  ret <8 x half> [[ADD]]
test_vfmaq_f16(float16x8_t a,float16x8_t b,float16x8_t c)860 float16x8_t test_vfmaq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
861   return vfmaq_f16(a, b, c);
862 }
863 
864 // CHECK-LABEL: test_vfms_f16
865 // CHECK:  [[SUB:%.*]] = fsub <4 x half> <half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000>, %b
866 // CHECK:  [[ADD:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[SUB]], <4 x half> %c, <4 x half> %a)
867 // CHECK:  ret <4 x half> [[ADD]]
test_vfms_f16(float16x4_t a,float16x4_t b,float16x4_t c)868 float16x4_t test_vfms_f16(float16x4_t a, float16x4_t b, float16x4_t c) {
869   return vfms_f16(a, b, c);
870 }
871 
872 // CHECK-LABEL: test_vfmsq_f16
873 // CHECK:  [[SUB:%.*]] = fsub <8 x half> <half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000>, %b
874 // CHECK:  [[ADD:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[SUB]], <8 x half> %c, <8 x half> %a)
875 // CHECK:  ret <8 x half> [[ADD]]
test_vfmsq_f16(float16x8_t a,float16x8_t b,float16x8_t c)876 float16x8_t test_vfmsq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
877   return vfmsq_f16(a, b, c);
878 }
879 
880 // CHECK-LABEL: test_vfma_lane_f16
881 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8>
882 // CHECK: [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8>
883 // CHECK: [[TMP2:%.*]] = bitcast <4 x half> %c to <8 x i8>
884 // CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x half>
885 // CHECK: [[LANE:%.*]] = shufflevector <4 x half> [[TMP3]], <4 x half> [[TMP3]], <4 x i32> <i32 3, i32 3, i32 3, i32 3>
886 // CHECK: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half>
887 // CHECK: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
888 // CHECK: [[FMLA:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[TMP4]], <4 x half> [[LANE]], <4 x half> [[TMP5]])
889 // CHECK: ret <4 x half> [[FMLA]]
test_vfma_lane_f16(float16x4_t a,float16x4_t b,float16x4_t c)890 float16x4_t test_vfma_lane_f16(float16x4_t a, float16x4_t b, float16x4_t c) {
891   return vfma_lane_f16(a, b, c, 3);
892 }
893 
894 // CHECK-LABEL: test_vfmaq_lane_f16
895 // CHECK: [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8>
896 // CHECK: [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8>
897 // CHECK: [[TMP2:%.*]] = bitcast <4 x half> %c to <8 x i8>
898 // CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x half>
899 // CHECK: [[LANE:%.*]] = shufflevector <4 x half> [[TMP3]], <4 x half> [[TMP3]], <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
900 // CHECK: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half>
901 // CHECK: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
902 // CHECK: [[FMLA:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[TMP4]], <8 x half> [[LANE]], <8 x half> [[TMP5]])
903 // CHECK: ret <8 x half> [[FMLA]]
test_vfmaq_lane_f16(float16x8_t a,float16x8_t b,float16x4_t c)904 float16x8_t test_vfmaq_lane_f16(float16x8_t a, float16x8_t b, float16x4_t c) {
905   return vfmaq_lane_f16(a, b, c, 3);
906 }
907 
908 // CHECK-LABEL: test_vfma_laneq_f16
909 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8>
910 // CHECK: [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8>
911 // CHECK: [[TMP2:%.*]] = bitcast <8 x half> %c to <16 x i8>
912 // CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
913 // CHECK: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half>
914 // CHECK: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x half>
915 // CHECK: [[LANE:%.*]] = shufflevector <8 x half> [[TMP5]], <8 x half> [[TMP5]], <4 x i32> <i32 7, i32 7, i32 7, i32 7>
916 // CHECK: [[FMLA:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[LANE]], <4 x half> [[TMP4]], <4 x half> [[TMP3]])
917 // CHECK: ret <4 x half> [[FMLA]]
test_vfma_laneq_f16(float16x4_t a,float16x4_t b,float16x8_t c)918 float16x4_t test_vfma_laneq_f16(float16x4_t a, float16x4_t b, float16x8_t c) {
919   return vfma_laneq_f16(a, b, c, 7);
920 }
921 
922 // CHECK-LABEL: test_vfmaq_laneq_f16
923 // CHECK: [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8>
924 // CHECK: [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8>
925 // CHECK: [[TMP2:%.*]] = bitcast <8 x half> %c to <16 x i8>
926 // CHECK: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
927 // CHECK: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half>
928 // CHECK: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x half>
929 // CHECK: [[LANE:%.*]] = shufflevector <8 x half> [[TMP5]], <8 x half> [[TMP5]], <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
930 // CHECK: [[FMLA:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[LANE]], <8 x half> [[TMP4]], <8 x half> [[TMP3]])
931 // CHECK: ret <8 x half> [[FMLA]]
test_vfmaq_laneq_f16(float16x8_t a,float16x8_t b,float16x8_t c)932 float16x8_t test_vfmaq_laneq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
933   return vfmaq_laneq_f16(a, b, c, 7);
934 }
935 
936 // CHECK-LABEL: test_vfma_n_f16
937 // CHECK: [[TMP0:%.*]] = insertelement <4 x half> undef, half %c, i32 0
938 // CHECK: [[TMP1:%.*]] = insertelement <4 x half> [[TMP0]], half %c, i32 1
939 // CHECK: [[TMP2:%.*]] = insertelement <4 x half> [[TMP1]], half %c, i32 2
940 // CHECK: [[TMP3:%.*]] = insertelement <4 x half> [[TMP2]], half %c, i32 3
941 // CHECK: [[FMA:%.*]]  = call <4 x half> @llvm.fma.v4f16(<4 x half> %b, <4 x half> [[TMP3]], <4 x half> %a)
942 // CHECK: ret <4 x half> [[FMA]]
test_vfma_n_f16(float16x4_t a,float16x4_t b,float16_t c)943 float16x4_t test_vfma_n_f16(float16x4_t a, float16x4_t b, float16_t c) {
944   return vfma_n_f16(a, b, c);
945 }
946 
947 // CHECK-LABEL: test_vfmaq_n_f16
948 // CHECK: [[TMP0:%.*]] = insertelement <8 x half> undef, half %c, i32 0
949 // CHECK: [[TMP1:%.*]] = insertelement <8 x half> [[TMP0]], half %c, i32 1
950 // CHECK: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half %c, i32 2
951 // CHECK: [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half %c, i32 3
952 // CHECK: [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half %c, i32 4
953 // CHECK: [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half %c, i32 5
954 // CHECK: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half %c, i32 6
955 // CHECK: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half %c, i32 7
956 // CHECK: [[FMA:%.*]]  = call <8 x half> @llvm.fma.v8f16(<8 x half> %b, <8 x half> [[TMP7]], <8 x half> %a)
957 // CHECK: ret <8 x half> [[FMA]]
test_vfmaq_n_f16(float16x8_t a,float16x8_t b,float16_t c)958 float16x8_t test_vfmaq_n_f16(float16x8_t a, float16x8_t b, float16_t c) {
959   return vfmaq_n_f16(a, b, c);
960 }
961 
962 // CHECK-LABEL: test_vfmah_lane_f16
963 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> %c to <8 x i8>
964 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
965 // CHECK: [[EXTR:%.*]] = extractelement <4 x half> [[TMP1]], i32 3
966 // CHECK: [[FMA:%.*]]  = call half @llvm.fma.f16(half %b, half [[EXTR]], half %a)
967 // CHECK: ret half [[FMA]]
test_vfmah_lane_f16(float16_t a,float16_t b,float16x4_t c)968 float16_t test_vfmah_lane_f16(float16_t a, float16_t b, float16x4_t c) {
969   return vfmah_lane_f16(a, b, c, 3);
970 }
971 
972 // CHECK-LABEL: test_vfmah_laneq_f16
973 // CHECK: [[TMP0:%.*]] = bitcast <8 x half> %c to <16 x i8>
974 // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
975 // CHECK: [[EXTR:%.*]] = extractelement <8 x half> [[TMP1]], i32 7
976 // CHECK: [[FMA:%.*]]  = call half @llvm.fma.f16(half %b, half [[EXTR]], half %a)
977 // CHECK: ret half [[FMA]]
test_vfmah_laneq_f16(float16_t a,float16_t b,float16x8_t c)978 float16_t test_vfmah_laneq_f16(float16_t a, float16_t b, float16x8_t c) {
979   return vfmah_laneq_f16(a, b, c, 7);
980 }
981 
982 // CHECK-LABEL: test_vfms_lane_f16
983 // CHECK: [[SUB:%.*]]  = fsub <4 x half> <half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000>, %b
984 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8>
985 // CHECK: [[TMP1:%.*]] = bitcast <4 x half> [[SUB]] to <8 x i8>
986 // CHECK: [[TMP2:%.*]] = bitcast <4 x half> %c to <8 x i8>
987 // CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x half>
988 // CHECK: [[LANE:%.*]] = shufflevector <4 x half> [[TMP3]], <4 x half> [[TMP3]], <4 x i32> <i32 3, i32 3, i32 3, i32 3>
989 // CHECK: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half>
990 // CHECK: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
991 // CHECK: [[FMA:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[TMP4]], <4 x half> [[LANE]], <4 x half> [[TMP5]])
992 // CHECK: ret <4 x half> [[FMA]]
test_vfms_lane_f16(float16x4_t a,float16x4_t b,float16x4_t c)993 float16x4_t test_vfms_lane_f16(float16x4_t a, float16x4_t b, float16x4_t c) {
994   return vfms_lane_f16(a, b, c, 3);
995 }
996 
997 // CHECK-LABEL: test_vfmsq_lane_f16
998 // CHECK: [[SUB:%.*]]  = fsub <8 x half> <half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000>, %b
999 // CHECK: [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8>
1000 // CHECK: [[TMP1:%.*]] = bitcast <8 x half> [[SUB]] to <16 x i8>
1001 // CHECK: [[TMP2:%.*]] = bitcast <4 x half> %c to <8 x i8>
1002 // CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x half>
1003 // CHECK: [[LANE:%.*]] = shufflevector <4 x half> [[TMP3]], <4 x half> [[TMP3]], <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
1004 // CHECK: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half>
1005 // CHECK: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
1006 // CHECK: [[FMLA:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[TMP4]], <8 x half> [[LANE]], <8 x half> [[TMP5]])
1007 // CHECK: ret <8 x half> [[FMLA]]
test_vfmsq_lane_f16(float16x8_t a,float16x8_t b,float16x4_t c)1008 float16x8_t test_vfmsq_lane_f16(float16x8_t a, float16x8_t b, float16x4_t c) {
1009   return vfmsq_lane_f16(a, b, c, 3);
1010 }
1011 
1012 // CHECK-LABEL: test_vfms_laneq_f16
1013 // CHECK: [[SUB:%.*]]  = fsub <4 x half> <half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000>, %b
1014 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8>
1015 // CHECK: [[TMP1:%.*]] = bitcast <4 x half> [[SUB]] to <8 x i8>
1016 // CHECK: [[TMP2:%.*]] = bitcast <8 x half> %c to <16 x i8>
1017 // CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
1018 // CHECK: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half>
1019 // CHECK: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x half>
1020 // CHECK: [[LANE:%.*]] = shufflevector <8 x half> [[TMP5]], <8 x half> [[TMP5]], <4 x i32> <i32 7, i32 7, i32 7, i32 7>
1021 // CHECK: [[FMLA:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[LANE]], <4 x half> [[TMP4]], <4 x half> [[TMP3]])
1022 // CHECK: ret <4 x half> [[FMLA]]
test_vfms_laneq_f16(float16x4_t a,float16x4_t b,float16x8_t c)1023 float16x4_t test_vfms_laneq_f16(float16x4_t a, float16x4_t b, float16x8_t c) {
1024   return vfms_laneq_f16(a, b, c, 7);
1025 }
1026 
1027 // CHECK-LABEL: test_vfmsq_laneq_f16
1028 // CHECK: [[SUB:%.*]]  = fsub <8 x half> <half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000>, %b
1029 // CHECK: [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8>
1030 // CHECK: [[TMP1:%.*]] = bitcast <8 x half> [[SUB]] to <16 x i8>
1031 // CHECK: [[TMP2:%.*]] = bitcast <8 x half> %c to <16 x i8>
1032 // CHECK: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
1033 // CHECK: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half>
1034 // CHECK: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x half>
1035 // CHECK: [[LANE:%.*]] = shufflevector <8 x half> [[TMP5]], <8 x half> [[TMP5]], <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
1036 // CHECK: [[FMLA:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[LANE]], <8 x half> [[TMP4]], <8 x half> [[TMP3]])
1037 // CHECK: ret <8 x half> [[FMLA]]
test_vfmsq_laneq_f16(float16x8_t a,float16x8_t b,float16x8_t c)1038 float16x8_t test_vfmsq_laneq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
1039   return vfmsq_laneq_f16(a, b, c, 7);
1040 }
1041 
1042 // CHECK-LABEL: test_vfms_n_f16
1043 // CHECK: [[SUB:%.*]]  = fsub <4 x half> <half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000>, %b
1044 // CHECK: [[TMP0:%.*]] = insertelement <4 x half> undef, half %c, i32 0
1045 // CHECK: [[TMP1:%.*]] = insertelement <4 x half> [[TMP0]], half %c, i32 1
1046 // CHECK: [[TMP2:%.*]] = insertelement <4 x half> [[TMP1]], half %c, i32 2
1047 // CHECK: [[TMP3:%.*]] = insertelement <4 x half> [[TMP2]], half %c, i32 3
1048 // CHECK: [[FMA:%.*]]  = call <4 x half> @llvm.fma.v4f16(<4 x half> [[SUB]], <4 x half> [[TMP3]], <4 x half> %a)
1049 // CHECK: ret <4 x half> [[FMA]]
test_vfms_n_f16(float16x4_t a,float16x4_t b,float16_t c)1050 float16x4_t test_vfms_n_f16(float16x4_t a, float16x4_t b, float16_t c) {
1051   return vfms_n_f16(a, b, c);
1052 }
1053 
1054 // CHECK-LABEL: test_vfmsq_n_f16
1055 // CHECK: [[SUB:%.*]]  = fsub <8 x half> <half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000>, %b
1056 // CHECK: [[TMP0:%.*]] = insertelement <8 x half> undef, half %c, i32 0
1057 // CHECK: [[TMP1:%.*]] = insertelement <8 x half> [[TMP0]], half %c, i32 1
1058 // CHECK: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half %c, i32 2
1059 // CHECK: [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half %c, i32 3
1060 // CHECK: [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half %c, i32 4
1061 // CHECK: [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half %c, i32 5
1062 // CHECK: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half %c, i32 6
1063 // CHECK: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half %c, i32 7
1064 // CHECK: [[FMA:%.*]]  = call <8 x half> @llvm.fma.v8f16(<8 x half> [[SUB]], <8 x half> [[TMP7]], <8 x half> %a)
1065 // CHECK: ret <8 x half> [[FMA]]
test_vfmsq_n_f16(float16x8_t a,float16x8_t b,float16_t c)1066 float16x8_t test_vfmsq_n_f16(float16x8_t a, float16x8_t b, float16_t c) {
1067   return vfmsq_n_f16(a, b, c);
1068 }
1069 
1070 // CHECK-LABEL: test_vfmsh_lane_f16
1071 // CHECK: [[TMP0:%.*]] = fpext half %b to float
1072 // CHECK: [[TMP1:%.*]] = fsub float -0.000000e+00, [[TMP0]]
1073 // CHECK: [[SUB:%.*]]  = fptrunc float [[TMP1]] to half
1074 // CHECK: [[TMP2:%.*]] = bitcast <4 x half> %c to <8 x i8>
1075 // CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x half>
1076 // CHECK: [[EXTR:%.*]] = extractelement <4 x half> [[TMP3]], i32 3
1077 // CHECK: [[FMA:%.*]]  = call half @llvm.fma.f16(half [[SUB]], half [[EXTR]], half %a)
1078 // CHECK: ret half [[FMA]]
test_vfmsh_lane_f16(float16_t a,float16_t b,float16x4_t c)1079 float16_t test_vfmsh_lane_f16(float16_t a, float16_t b, float16x4_t c) {
1080   return vfmsh_lane_f16(a, b, c, 3);
1081 }
1082 
1083 // CHECK-LABEL: test_vfmsh_laneq_f16
1084 // CHECK: [[TMP0:%.*]] = fpext half %b to float
1085 // CHECK: [[TMP1:%.*]] = fsub float -0.000000e+00, [[TMP0]]
1086 // CHECK: [[SUB:%.*]]  = fptrunc float [[TMP1]] to half
1087 // CHECK: [[TMP2:%.*]] = bitcast <8 x half> %c to <16 x i8>
1088 // CHECK: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x half>
1089 // CHECK: [[EXTR:%.*]] = extractelement <8 x half> [[TMP3]], i32 7
1090 // CHECK: [[FMA:%.*]]  = call half @llvm.fma.f16(half [[SUB]], half [[EXTR]], half %a)
1091 // CHECK: ret half [[FMA]]
test_vfmsh_laneq_f16(float16_t a,float16_t b,float16x8_t c)1092 float16_t test_vfmsh_laneq_f16(float16_t a, float16_t b, float16x8_t c) {
1093   return vfmsh_laneq_f16(a, b, c, 7);
1094 }
1095 
1096 // CHECK-LABEL: test_vmul_lane_f16
1097 // CHECK: [[TMP0:%.*]] = shufflevector <4 x half> %b, <4 x half> %b, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
1098 // CHECK: [[MUL:%.*]]  = fmul <4 x half> %a, [[TMP0]]
1099 // CHECK: ret <4 x half> [[MUL]]
test_vmul_lane_f16(float16x4_t a,float16x4_t b)1100 float16x4_t test_vmul_lane_f16(float16x4_t a, float16x4_t b) {
1101   return vmul_lane_f16(a, b, 3);
1102 }
1103 
1104 // CHECK-LABEL: test_vmulq_lane_f16
1105 // CHECK: [[TMP0:%.*]] = shufflevector <4 x half> %b, <4 x half> %b, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
1106 // CHECK: [[MUL:%.*]]  = fmul <8 x half> %a, [[TMP0]]
1107 // CHECK: ret <8 x half> [[MUL]]
test_vmulq_lane_f16(float16x8_t a,float16x4_t b)1108 float16x8_t test_vmulq_lane_f16(float16x8_t a, float16x4_t b) {
1109   return vmulq_lane_f16(a, b, 7);
1110 }
1111 
1112 // CHECK-LABEL: test_vmul_laneq_f16
1113 // CHECK: [[TMP0:%.*]] = shufflevector <8 x half> %b, <8 x half> %b, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
1114 // CHECK: [[MUL:%.*]]  = fmul <4 x half> %a, [[TMP0]]
1115 // CHECK: ret <4 x half> [[MUL]]
test_vmul_laneq_f16(float16x4_t a,float16x8_t b)1116 float16x4_t test_vmul_laneq_f16(float16x4_t a, float16x8_t b) {
1117   return vmul_laneq_f16(a, b, 7);
1118 }
1119 
1120 // CHECK-LABEL: test_vmulq_laneq_f16
1121 // CHECK: [[TMP0:%.*]] = shufflevector <8 x half> %b, <8 x half> %b, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
1122 // CHECK: [[MUL:%.*]]  = fmul <8 x half> %a, [[TMP0]]
1123 // CHECK: ret <8 x half> [[MUL]]
test_vmulq_laneq_f16(float16x8_t a,float16x8_t b)1124 float16x8_t test_vmulq_laneq_f16(float16x8_t a, float16x8_t b) {
1125   return vmulq_laneq_f16(a, b, 7);
1126 }
1127 
1128 // CHECK-LABEL: test_vmul_n_f16
1129 // CHECK: [[TMP0:%.*]] = insertelement <4 x half> undef, half %b, i32 0
1130 // CHECK: [[TMP1:%.*]] = insertelement <4 x half> [[TMP0]], half %b, i32 1
1131 // CHECK: [[TMP2:%.*]] = insertelement <4 x half> [[TMP1]], half %b, i32 2
1132 // CHECK: [[TMP3:%.*]] = insertelement <4 x half> [[TMP2]], half %b, i32 3
1133 // CHECK: [[MUL:%.*]]  = fmul <4 x half> %a, [[TMP3]]
1134 // CHECK: ret <4 x half> [[MUL]]
test_vmul_n_f16(float16x4_t a,float16_t b)1135 float16x4_t test_vmul_n_f16(float16x4_t a, float16_t b) {
1136   return vmul_n_f16(a, b);
1137 }
1138 
1139 // CHECK-LABEL: test_vmulq_n_f16
1140 // CHECK: [[TMP0:%.*]] = insertelement <8 x half> undef, half %b, i32 0
1141 // CHECK: [[TMP1:%.*]] = insertelement <8 x half> [[TMP0]], half %b, i32 1
1142 // CHECK: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half %b, i32 2
1143 // CHECK: [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half %b, i32 3
1144 // CHECK: [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half %b, i32 4
1145 // CHECK: [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half %b, i32 5
1146 // CHECK: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half %b, i32 6
1147 // CHECK: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half %b, i32 7
1148 // CHECK: [[MUL:%.*]]  = fmul <8 x half> %a, [[TMP7]]
1149 // CHECK: ret <8 x half> [[MUL]]
test_vmulq_n_f16(float16x8_t a,float16_t b)1150 float16x8_t test_vmulq_n_f16(float16x8_t a, float16_t b) {
1151   return vmulq_n_f16(a, b);
1152 }
1153 
1154 // FIXME: Fix it when fp16 non-storage-only type becomes available.
1155 // CHECK-LABEL: test_vmulh_lane_f16
1156 // CHECK: [[CONV0:%.*]] = fpext half %a to float
1157 // CHECK: [[CONV1:%.*]] = fpext half %{{.*}} to float
1158 // CHECK: [[MUL:%.*]]   = fmul float [[CONV0:%.*]], [[CONV0:%.*]]
1159 // CHECK: [[CONV3:%.*]] = fptrunc float %mul to half
1160 // CHECK: ret half [[CONV3:%.*]]
test_vmulh_lane_f16(float16_t a,float16x4_t b)1161 float16_t test_vmulh_lane_f16(float16_t a, float16x4_t b) {
1162   return vmulh_lane_f16(a, b, 3);
1163 }
1164 
1165 // CHECK-LABEL: test_vmulh_laneq_f16
1166 // CHECK: [[CONV0:%.*]] = fpext half %a to float
1167 // CHECK: [[CONV1:%.*]] = fpext half %{{.*}} to float
1168 // CHECK: [[MUL:%.*]]   = fmul float [[CONV0:%.*]], [[CONV0:%.*]]
1169 // CHECK: [[CONV3:%.*]] = fptrunc float %mul to half
1170 // CHECK: ret half [[CONV3:%.*]]
test_vmulh_laneq_f16(float16_t a,float16x8_t b)1171 float16_t test_vmulh_laneq_f16(float16_t a, float16x8_t b) {
1172   return vmulh_laneq_f16(a, b, 7);
1173 }
1174 
1175 // CHECK-LABEL: test_vmulx_lane_f16
1176 // CHECK: [[TMP0:%.*]] = shufflevector <4 x half> %b, <4 x half> %b, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
1177 // CHECK: [[MUL:%.*]] = call <4 x half> @llvm.aarch64.neon.fmulx.v4f16(<4 x half> %a, <4 x half> [[TMP0]])
1178 // CHECK: ret <4 x half> [[MUL]]
test_vmulx_lane_f16(float16x4_t a,float16x4_t b)1179 float16x4_t test_vmulx_lane_f16(float16x4_t a, float16x4_t b) {
1180   return vmulx_lane_f16(a, b, 3);
1181 }
1182 
1183 // CHECK-LABEL: test_vmulxq_lane_f16
1184 // CHECK: [[TMP0:%.*]] = shufflevector <4 x half> %b, <4 x half> %b, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
1185 // CHECK: [[MUL:%.*]] = call <8 x half> @llvm.aarch64.neon.fmulx.v8f16(<8 x half> %a, <8 x half> [[TMP0]])
1186 // CHECK: ret <8 x half> [[MUL]]
test_vmulxq_lane_f16(float16x8_t a,float16x4_t b)1187 float16x8_t test_vmulxq_lane_f16(float16x8_t a, float16x4_t b) {
1188   return vmulxq_lane_f16(a, b, 7);
1189 }
1190 
1191 // CHECK-LABEL: test_vmulx_laneq_f16
1192 // CHECK: [[TMP0:%.*]] = shufflevector <8 x half> %b, <8 x half> %b, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
1193 // CHECK: [[MUL:%.*]]  = call <4 x half> @llvm.aarch64.neon.fmulx.v4f16(<4 x half> %a, <4 x half> [[TMP0]])
1194 // CHECK: ret <4 x half> [[MUL]]
test_vmulx_laneq_f16(float16x4_t a,float16x8_t b)1195 float16x4_t test_vmulx_laneq_f16(float16x4_t a, float16x8_t b) {
1196   return vmulx_laneq_f16(a, b, 7);
1197 }
1198 
1199 // CHECK-LABEL: test_vmulxq_laneq_f16
1200 // CHECK: [[TMP0:%.*]] = shufflevector <8 x half> %b, <8 x half> %b, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
1201 // CHECK: [[MUL:%.*]]  = call <8 x half> @llvm.aarch64.neon.fmulx.v8f16(<8 x half> %a, <8 x half> [[TMP0]])
1202 // CHECK: ret <8 x half> [[MUL]]
test_vmulxq_laneq_f16(float16x8_t a,float16x8_t b)1203 float16x8_t test_vmulxq_laneq_f16(float16x8_t a, float16x8_t b) {
1204   return vmulxq_laneq_f16(a, b, 7);
1205 }
1206 
1207 // CHECK-LABEL: test_vmulx_n_f16
1208 // CHECK: [[TMP0:%.*]] = insertelement <4 x half> undef, half %b, i32 0
1209 // CHECK: [[TMP1:%.*]] = insertelement <4 x half> [[TMP0]], half %b, i32 1
1210 // CHECK: [[TMP2:%.*]] = insertelement <4 x half> [[TMP1]], half %b, i32 2
1211 // CHECK: [[TMP3:%.*]] = insertelement <4 x half> [[TMP2]], half %b, i32 3
1212 // CHECK: [[MUL:%.*]]  = call <4 x half> @llvm.aarch64.neon.fmulx.v4f16(<4 x half> %a, <4 x half> [[TMP3]])
1213 // CHECK: ret <4 x half> [[MUL]]
test_vmulx_n_f16(float16x4_t a,float16_t b)1214 float16x4_t test_vmulx_n_f16(float16x4_t a, float16_t b) {
1215   return vmulx_n_f16(a, b);
1216 }
1217 
1218 // CHECK-LABEL: test_vmulxq_n_f16
1219 // CHECK: [[TMP0:%.*]] = insertelement <8 x half> undef, half %b, i32 0
1220 // CHECK: [[TMP1:%.*]] = insertelement <8 x half> [[TMP0]], half %b, i32 1
1221 // CHECK: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half %b, i32 2
1222 // CHECK: [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half %b, i32 3
1223 // CHECK: [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half %b, i32 4
1224 // CHECK: [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half %b, i32 5
1225 // CHECK: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half %b, i32 6
1226 // CHECK: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half %b, i32 7
1227 // CHECK: [[MUL:%.*]]  = call <8 x half> @llvm.aarch64.neon.fmulx.v8f16(<8 x half> %a, <8 x half> [[TMP7]])
1228 // CHECK: ret <8 x half> [[MUL]]
test_vmulxq_n_f16(float16x8_t a,float16_t b)1229 float16x8_t test_vmulxq_n_f16(float16x8_t a, float16_t b) {
1230   return vmulxq_n_f16(a, b);
1231 }
1232 
1233 // CHECK-LABEL: test_vmulxh_lane_f16
1234 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> %b to <8 x i8>
1235 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
1236 // CHECK: [[EXTR:%.*]] = extractelement <4 x half> [[TMP1]], i32 3
1237 // CHECK: [[MULX:%.*]] = call half @llvm.aarch64.neon.fmulx.f16(half %a, half [[EXTR]]
1238 // CHECK: ret half [[MULX]]
test_vmulxh_lane_f16(float16_t a,float16x4_t b)1239 float16_t test_vmulxh_lane_f16(float16_t a, float16x4_t b) {
1240   return vmulxh_lane_f16(a, b, 3);
1241 }
1242 
1243 // CHECK-LABEL: test_vmulxh_laneq_f16
1244 // CHECK: [[TMP0:%.*]] = bitcast <8 x half> %b to <16 x i8>
1245 // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
1246 // CHECK: [[EXTR:%.*]] = extractelement <8 x half> [[TMP1]], i32 7
1247 // CHECK: [[MULX:%.*]] = call half @llvm.aarch64.neon.fmulx.f16(half %a, half [[EXTR]])
1248 // CHECK: ret half [[MULX]]
test_vmulxh_laneq_f16(float16_t a,float16x8_t b)1249 float16_t test_vmulxh_laneq_f16(float16_t a, float16x8_t b) {
1250   return vmulxh_laneq_f16(a, b, 7);
1251 }
1252 
1253 // CHECK-LABEL: test_vmaxv_f16
1254 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8>
1255 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
1256 // CHECK: [[MAX:%.*]]  = call half @llvm.aarch64.neon.fmaxv.f16.v4f16(<4 x half> [[TMP1]])
1257 // CHECK: ret half [[MAX]]
test_vmaxv_f16(float16x4_t a)1258 float16_t test_vmaxv_f16(float16x4_t a) {
1259   return vmaxv_f16(a);
1260 }
1261 
1262 // CHECK-LABEL: test_vmaxvq_f16
1263 // CHECK: [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8>
1264 // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
1265 // CHECK: [[MAX:%.*]]  = call half @llvm.aarch64.neon.fmaxv.f16.v8f16(<8 x half> [[TMP1]])
1266 // CHECK: ret half [[MAX]]
test_vmaxvq_f16(float16x8_t a)1267 float16_t test_vmaxvq_f16(float16x8_t a) {
1268   return vmaxvq_f16(a);
1269 }
1270 
1271 // CHECK-LABEL: test_vminv_f16
1272 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8>
1273 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
1274 // CHECK: [[MAX:%.*]]  = call half @llvm.aarch64.neon.fminv.f16.v4f16(<4 x half> [[TMP1]])
1275 // CHECK: ret half [[MAX]]
test_vminv_f16(float16x4_t a)1276 float16_t test_vminv_f16(float16x4_t a) {
1277   return vminv_f16(a);
1278 }
1279 
1280 // CHECK-LABEL: test_vminvq_f16
1281 // CHECK: [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8>
1282 // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
1283 // CHECK: [[MAX:%.*]]  = call half @llvm.aarch64.neon.fminv.f16.v8f16(<8 x half> [[TMP1]])
1284 // CHECK: ret half [[MAX]]
test_vminvq_f16(float16x8_t a)1285 float16_t test_vminvq_f16(float16x8_t a) {
1286   return vminvq_f16(a);
1287 }
1288 
1289 // CHECK-LABEL: test_vmaxnmv_f16
1290 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8>
1291 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
1292 // CHECK: [[MAX:%.*]]  = call half @llvm.aarch64.neon.fmaxnmv.f16.v4f16(<4 x half> [[TMP1]])
1293 // CHECK: ret half [[MAX]]
test_vmaxnmv_f16(float16x4_t a)1294 float16_t test_vmaxnmv_f16(float16x4_t a) {
1295   return vmaxnmv_f16(a);
1296 }
1297 
1298 // CHECK-LABEL: test_vmaxnmvq_f16
1299 // CHECK: [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8>
1300 // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
1301 // CHECK: [[MAX:%.*]]  = call half @llvm.aarch64.neon.fmaxnmv.f16.v8f16(<8 x half> [[TMP1]])
1302 // CHECK: ret half [[MAX]]
test_vmaxnmvq_f16(float16x8_t a)1303 float16_t test_vmaxnmvq_f16(float16x8_t a) {
1304   return vmaxnmvq_f16(a);
1305 }
1306 
1307 // CHECK-LABEL: test_vminnmv_f16
1308 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8>
1309 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
1310 // CHECK: [[MAX:%.*]]  = call half @llvm.aarch64.neon.fminnmv.f16.v4f16(<4 x half> [[TMP1]])
1311 // CHECK: ret half [[MAX]]
test_vminnmv_f16(float16x4_t a)1312 float16_t test_vminnmv_f16(float16x4_t a) {
1313   return vminnmv_f16(a);
1314 }
1315 
1316 // CHECK-LABEL: test_vminnmvq_f16
1317 // CHECK: [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8>
1318 // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
1319 // CHECK: [[MAX:%.*]]  = call half @llvm.aarch64.neon.fminnmv.f16.v8f16(<8 x half> [[TMP1]])
1320 // CHECK: ret half [[MAX]]
test_vminnmvq_f16(float16x8_t a)1321 float16_t test_vminnmvq_f16(float16x8_t a) {
1322   return vminnmvq_f16(a);
1323 }
1324 
1325 // CHECK-LABEL: test_vbsl_f16
1326 // CHECK:  [[TMP0:%.*]] = bitcast <4 x half> %b to <8 x i8>
1327 // CHECK:  [[TMP1:%.*]] = bitcast <4 x half> %c to <8 x i8>
1328 // CHECK:  [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
1329 // CHECK:  [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
1330 // CHECK:  [[TMP4:%.*]] = and <4 x i16> %a, [[TMP2]]
1331 // CHECK:  [[TMP5:%.*]] = xor <4 x i16> %a, <i16 -1, i16 -1, i16 -1, i16 -1>
1332 // CHECK:  [[TMP6:%.*]] = and <4 x i16> [[TMP5]], [[TMP3]]
1333 // CHECK:  [[TMP7:%.*]] = or <4 x i16> [[TMP4]], [[TMP6]]
1334 // CHECK:  [[TMP8:%.*]] = bitcast <4 x i16> [[TMP7]] to <4 x half>
1335 // CHECK:  ret <4 x half> [[TMP8]]
test_vbsl_f16(uint16x4_t a,float16x4_t b,float16x4_t c)1336 float16x4_t test_vbsl_f16(uint16x4_t a, float16x4_t b, float16x4_t c) {
1337   return vbsl_f16(a, b, c);
1338 }
1339 
1340 // CHECK-LABEL: test_vbslq_f16
1341 // CHECK:  [[TMP0:%.*]] = bitcast <8 x half> %b to <16 x i8>
1342 // CHECK:  [[TMP1:%.*]] = bitcast <8 x half> %c to <16 x i8>
1343 // CHECK:  [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
1344 // CHECK:  [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
1345 // CHECK:  [[TMP4:%.*]] = and <8 x i16> %a, [[TMP2]]
1346 // CHECK:  [[TMP5:%.*]] = xor <8 x i16> %a, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
1347 // CHECK:  [[TMP6:%.*]] = and <8 x i16> [[TMP5]], [[TMP3]]
1348 // CHECK:  [[TMP7:%.*]] = or <8 x i16> [[TMP4]], [[TMP6]]
1349 // CHECK:  [[TMP8:%.*]] = bitcast <8 x i16> [[TMP7]] to <8 x half>
1350 // CHECK:  ret <8 x half> [[TMP8]]
test_vbslq_f16(uint16x8_t a,float16x8_t b,float16x8_t c)1351 float16x8_t test_vbslq_f16(uint16x8_t a, float16x8_t b, float16x8_t c) {
1352   return vbslq_f16(a, b, c);
1353 }
1354 
1355 // CHECK-LABEL: test_vzip_f16
1356 // CHECK:   [[RETVAL:%.*]]  = alloca %struct.float16x4x2_t, align 8
1357 // CHECK:   [[__RET_I:%.*]] = alloca %struct.float16x4x2_t, align 8
1358 // CHECK:   [[TMP0:%.*]]  = bitcast %struct.float16x4x2_t* [[RETVAL]] to i8*
1359 // CHECK:   [[TMP1:%.*]]  = bitcast i8* [[TMP0]] to <4 x half>*
1360 // CHECK:   [[VZIP0_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
1361 // CHECK:   store <4 x half> [[VZIP0_I]], <4 x half>* [[TMP1]]
1362 // CHECK:   [[TMP2:%.*]] = getelementptr inbounds <4 x half>, <4 x half>* [[TMP1]], i32 1
1363 // CHECK:   [[VZIP1_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
1364 // CHECK:   store <4 x half> [[VZIP1_I]], <4 x half>* [[TMP2]]
test_vzip_f16(float16x4_t a,float16x4_t b)1365 float16x4x2_t test_vzip_f16(float16x4_t a, float16x4_t b) {
1366   return vzip_f16(a, b);
1367 }
1368 
1369 // CHECK-LABEL: test_vzipq_f16
1370 // CHECK:   [[RETVAL:%.*]]  = alloca %struct.float16x8x2_t, align 16
1371 // CHECK:   [[__RET_I:%.*]] = alloca %struct.float16x8x2_t, align 16
1372 // CHECK:   [[TMP0:%.*]]  = bitcast %struct.float16x8x2_t* [[RETVAL]] to i8*
1373 // CHECK:   [[TMP1:%.*]]  = bitcast i8* [[TMP0]] to <8 x half>*
1374 // CHECK:   [[VZIP0_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
1375 // CHECK:   store <8 x half> [[VZIP0_I]], <8 x half>* [[TMP1]]
1376 // CHECK:   [[TMP2:%.*]] = getelementptr inbounds <8 x half>, <8 x half>* [[TMP1]], i32 1
1377 // CHECK:   [[VZIP1_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
1378 // CHECK:   store <8 x half> [[VZIP1_I]], <8 x half>* [[TMP2]]
test_vzipq_f16(float16x8_t a,float16x8_t b)1379 float16x8x2_t test_vzipq_f16(float16x8_t a, float16x8_t b) {
1380   return vzipq_f16(a, b);
1381 }
1382 
1383 // CHECK-LABEL: test_vuzp_f16
1384 // CHECK:   [[RETVAL:%.*]]  = alloca %struct.float16x4x2_t, align 8
1385 // CHECK:   [[__RET_I:%.*]] = alloca %struct.float16x4x2_t, align 8
1386 // CHECK:   [[TMP0:%.*]]  = bitcast %struct.float16x4x2_t* [[RETVAL]] to i8*
1387 // CHECK:   [[TMP1:%.*]]  = bitcast i8* [[TMP0]] to <4 x half>*
1388 // CHECK:   [[VZIP0_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
1389 // CHECK:   store <4 x half> [[VZIP0_I]], <4 x half>* [[TMP1]]
1390 // CHECK:   [[TMP2:%.*]] = getelementptr inbounds <4 x half>, <4 x half>* [[TMP1]], i32 1
1391 // CHECK:   [[VZIP1_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
1392 // CHECK:   store <4 x half> [[VZIP1_I]], <4 x half>* [[TMP2]]
test_vuzp_f16(float16x4_t a,float16x4_t b)1393 float16x4x2_t test_vuzp_f16(float16x4_t a, float16x4_t b) {
1394   return vuzp_f16(a, b);
1395 }
1396 
1397 // CHECK-LABEL: test_vuzpq_f16
1398 // CHECK:   [[RETVAL:%.*]]  = alloca %struct.float16x8x2_t, align 16
1399 // CHECK:   [[__RET_I:%.*]] = alloca %struct.float16x8x2_t, align 16
1400 // CHECK:   [[TMP0:%.*]]  = bitcast %struct.float16x8x2_t* [[RETVAL]] to i8*
1401 // CHECK:   [[TMP1:%.*]]  = bitcast i8* [[TMP0]] to <8 x half>*
1402 // CHECK:   [[VZIP0_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
1403 // CHECK:   store <8 x half> [[VZIP0_I]], <8 x half>* [[TMP1]]
1404 // CHECK:   [[TMP2:%.*]] = getelementptr inbounds <8 x half>, <8 x half>* [[TMP1]], i32 1
1405 // CHECK:   [[VZIP1_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
1406 // CHECK:   store <8 x half> [[VZIP1_I]], <8 x half>* [[TMP2]]
test_vuzpq_f16(float16x8_t a,float16x8_t b)1407 float16x8x2_t test_vuzpq_f16(float16x8_t a, float16x8_t b) {
1408   return vuzpq_f16(a, b);
1409 }
1410 
1411 // CHECK-LABEL: test_vtrn_f16
1412 // CHECK:   [[RETVAL:%.*]]  = alloca %struct.float16x4x2_t, align 8
1413 // CHECK:   [[__RET_I:%.*]] = alloca %struct.float16x4x2_t, align 8
1414 // CHECK:   [[TMP0:%.*]]  = bitcast %struct.float16x4x2_t* [[RETVAL]] to i8*
1415 // CHECK:   [[TMP1:%.*]]  = bitcast i8* [[TMP0]] to <4 x half>*
1416 // CHECK:   [[VZIP0_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
1417 // CHECK:   store <4 x half> [[VZIP0_I]], <4 x half>* [[TMP1]]
1418 // CHECK:   [[TMP2:%.*]] = getelementptr inbounds <4 x half>, <4 x half>* [[TMP1]], i32 1
1419 // CHECK:   [[VZIP1_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
1420 // CHECK:   store <4 x half> [[VZIP1_I]], <4 x half>* [[TMP2]]
test_vtrn_f16(float16x4_t a,float16x4_t b)1421 float16x4x2_t test_vtrn_f16(float16x4_t a, float16x4_t b) {
1422   return vtrn_f16(a, b);
1423 }
1424 
1425 // CHECK-LABEL: test_vtrnq_f16
1426 // CHECK:   [[RETVAL:%.*]]  = alloca %struct.float16x8x2_t, align 16
1427 // CHECK:   [[__RET_I:%.*]] = alloca %struct.float16x8x2_t, align 16
1428 // CHECK:   [[TMP0:%.*]]  = bitcast %struct.float16x8x2_t* [[RETVAL]] to i8*
1429 // CHECK:   [[TMP1:%.*]]  = bitcast i8* [[TMP0]] to <8 x half>*
1430 // CHECK:   [[VZIP0_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
1431 // CHECK:   store <8 x half> [[VZIP0_I]], <8 x half>* [[TMP1]]
1432 // CHECK:   [[TMP2:%.*]] = getelementptr inbounds <8 x half>, <8 x half>* [[TMP1]], i32 1
1433 // CHECK:   [[VZIP1_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32>  <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
1434 // CHECK:   store <8 x half> [[VZIP1_I]], <8 x half>* [[TMP2]]
test_vtrnq_f16(float16x8_t a,float16x8_t b)1435 float16x8x2_t test_vtrnq_f16(float16x8_t a, float16x8_t b) {
1436   return vtrnq_f16(a, b);
1437 }
1438 
1439 // CHECK-LABEL: test_vmov_n_f16
1440 // CHECK:   [[TMP0:%.*]] = insertelement <4 x half> undef, half %a, i32 0
1441 // CHECK:   [[TMP1:%.*]] = insertelement <4 x half> [[TMP0]], half %a, i32 1
1442 // CHECK:   [[TMP2:%.*]] = insertelement <4 x half> [[TMP1]], half %a, i32 2
1443 // CHECK:   [[TMP3:%.*]] = insertelement <4 x half> [[TMP2]], half %a, i32 3
1444 // CHECK:   ret <4 x half> [[TMP3]]
test_vmov_n_f16(float16_t a)1445 float16x4_t test_vmov_n_f16(float16_t a) {
1446   return vmov_n_f16(a);
1447 }
1448 
1449 // CHECK-LABEL: test_vmovq_n_f16
1450 // CHECK:   [[TMP0:%.*]] = insertelement <8 x half> undef, half %a, i32 0
1451 // CHECK:   [[TMP1:%.*]] = insertelement <8 x half> [[TMP0]], half %a, i32 1
1452 // CHECK:   [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half %a, i32 2
1453 // CHECK:   [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half %a, i32 3
1454 // CHECK:   [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half %a, i32 4
1455 // CHECK:   [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half %a, i32 5
1456 // CHECK:   [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half %a, i32 6
1457 // CHECK:   [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half %a, i32 7
1458 // CHECK:   ret <8 x half> [[TMP7]]
test_vmovq_n_f16(float16_t a)1459 float16x8_t test_vmovq_n_f16(float16_t a) {
1460   return vmovq_n_f16(a);
1461 }
1462 
1463 // CHECK-LABEL: test_vdup_n_f16
1464 // CHECK:   [[TMP0:%.*]] = insertelement <4 x half> undef, half %a, i32 0
1465 // CHECK:   [[TMP1:%.*]] = insertelement <4 x half> [[TMP0]], half %a, i32 1
1466 // CHECK:   [[TMP2:%.*]] = insertelement <4 x half> [[TMP1]], half %a, i32 2
1467 // CHECK:   [[TMP3:%.*]] = insertelement <4 x half> [[TMP2]], half %a, i32 3
1468 // CHECK:   ret <4 x half> [[TMP3]]
test_vdup_n_f16(float16_t a)1469 float16x4_t test_vdup_n_f16(float16_t a) {
1470   return vdup_n_f16(a);
1471 }
1472 
1473 // CHECK-LABEL: test_vdupq_n_f16
1474 // CHECK:   [[TMP0:%.*]] = insertelement <8 x half> undef, half %a, i32 0
1475 // CHECK:   [[TMP1:%.*]] = insertelement <8 x half> [[TMP0]], half %a, i32 1
1476 // CHECK:   [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half %a, i32 2
1477 // CHECK:   [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half %a, i32 3
1478 // CHECK:   [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half %a, i32 4
1479 // CHECK:   [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half %a, i32 5
1480 // CHECK:   [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half %a, i32 6
1481 // CHECK:   [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half %a, i32 7
1482 // CHECK:   ret <8 x half> [[TMP7]]
test_vdupq_n_f16(float16_t a)1483 float16x8_t test_vdupq_n_f16(float16_t a) {
1484   return vdupq_n_f16(a);
1485 }
1486 
1487 // CHECK-LABEL: test_vdup_lane_f16
1488 // CHECK:   [[SHFL:%.*]] = shufflevector <4 x half> %a, <4 x half> %a, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
1489 // CHECK:   ret <4 x half> [[SHFL]]
test_vdup_lane_f16(float16x4_t a)1490 float16x4_t test_vdup_lane_f16(float16x4_t a) {
1491   return vdup_lane_f16(a, 3);
1492 }
1493 
1494 // CHECK-LABEL: test_vdupq_lane_f16
1495 // CHECK:   [[SHFL:%.*]] = shufflevector <4 x half> %a, <4 x half> %a, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
1496 // CHECK:   ret <8 x half> [[SHFL]]
test_vdupq_lane_f16(float16x4_t a)1497 float16x8_t test_vdupq_lane_f16(float16x4_t a) {
1498   return vdupq_lane_f16(a, 7);
1499 }
1500 
1501 // CHECK-LABEL: @test_vext_f16(
1502 // CHECK:   [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8>
1503 // CHECK:   [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8>
1504 // CHECK:   [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
1505 // CHECK:   [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half>
1506 // CHECK:   [[VEXT:%.*]] = shufflevector <4 x half> [[TMP2]], <4 x half> [[TMP3]], <4 x i32> <i32 2, i32 3, i32 4, i32 5>
1507 // CHECK:   ret <4 x half> [[VEXT]]
test_vext_f16(float16x4_t a,float16x4_t b)1508 float16x4_t test_vext_f16(float16x4_t a, float16x4_t b) {
1509   return vext_f16(a, b, 2);
1510 }
1511 
1512 // CHECK-LABEL: @test_vextq_f16(
1513 // CHECK:   [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8>
1514 // CHECK:   [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8>
1515 // CHECK:   [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
1516 // CHECK:   [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half>
1517 // CHECK:   [[VEXT:%.*]] = shufflevector <8 x half> [[TMP2]], <8 x half> [[TMP3]], <8 x i32> <i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12>
1518 // CHECK:   ret <8 x half> [[VEXT]]
test_vextq_f16(float16x8_t a,float16x8_t b)1519 float16x8_t test_vextq_f16(float16x8_t a, float16x8_t b) {
1520   return vextq_f16(a, b, 5);
1521 }
1522 
1523 // CHECK-LABEL: @test_vrev64_f16(
1524 // CHECK:   [[SHFL:%.*]] = shufflevector <4 x half> %a, <4 x half> %a, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
1525 // CHECK:   ret <4 x half> [[SHFL]]
test_vrev64_f16(float16x4_t a)1526 float16x4_t test_vrev64_f16(float16x4_t a) {
1527   return vrev64_f16(a);
1528 }
1529 
1530 // CHECK-LABEL: @test_vrev64q_f16(
1531 // CHECK:   [[SHFL:%.*]] = shufflevector <8 x half> %a, <8 x half> %a, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
1532 // CHECK:   ret <8 x half> [[SHFL]]
test_vrev64q_f16(float16x8_t a)1533 float16x8_t test_vrev64q_f16(float16x8_t a) {
1534   return vrev64q_f16(a);
1535 }
1536 
1537 // CHECK-LABEL: @test_vzip1_f16(
1538 // CHECK:   [[SHFL:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
1539 // CHECK:   ret <4 x half> [[SHFL]]
test_vzip1_f16(float16x4_t a,float16x4_t b)1540 float16x4_t test_vzip1_f16(float16x4_t a, float16x4_t b) {
1541   return vzip1_f16(a, b);
1542 }
1543 
1544 // CHECK-LABEL: @test_vzip1q_f16(
1545 // CHECK:   [[SHFL:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
1546 // CHECK:   ret <8 x half> [[SHFL]]
test_vzip1q_f16(float16x8_t a,float16x8_t b)1547 float16x8_t test_vzip1q_f16(float16x8_t a, float16x8_t b) {
1548   return vzip1q_f16(a, b);
1549 }
1550 
1551 // CHECK-LABEL: @test_vzip2_f16(
1552 // CHECK:   [[SHFL:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
1553 // CHECK:   ret <4 x half> [[SHFL]]
test_vzip2_f16(float16x4_t a,float16x4_t b)1554 float16x4_t test_vzip2_f16(float16x4_t a, float16x4_t b) {
1555   return vzip2_f16(a, b);
1556 }
1557 
1558 // CHECK-LABEL: @test_vzip2q_f16(
1559 // CHECK:   [[SHFL:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
1560 // CHECK:   ret <8 x half> [[SHFL]]
test_vzip2q_f16(float16x8_t a,float16x8_t b)1561 float16x8_t test_vzip2q_f16(float16x8_t a, float16x8_t b) {
1562   return vzip2q_f16(a, b);
1563 }
1564 
1565 // CHECK-LABEL: @test_vuzp1_f16(
1566 // CHECK:   [[SHUFFLE_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
1567 // CHECK:   ret <4 x half> [[SHUFFLE_I]]
test_vuzp1_f16(float16x4_t a,float16x4_t b)1568 float16x4_t test_vuzp1_f16(float16x4_t a, float16x4_t b) {
1569   return vuzp1_f16(a, b);
1570 }
1571 
1572 // CHECK-LABEL: @test_vuzp1q_f16(
1573 // CHECK:   [[SHUFFLE_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
1574 // CHECK:   ret <8 x half> [[SHUFFLE_I]]
test_vuzp1q_f16(float16x8_t a,float16x8_t b)1575 float16x8_t test_vuzp1q_f16(float16x8_t a, float16x8_t b) {
1576   return vuzp1q_f16(a, b);
1577 }
1578 
1579 // CHECK-LABEL: @test_vuzp2_f16(
1580 // CHECK:   [[SHUFFLE_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
1581 // CHECK:   ret <4 x half> [[SHUFFLE_I]]
test_vuzp2_f16(float16x4_t a,float16x4_t b)1582 float16x4_t test_vuzp2_f16(float16x4_t a, float16x4_t b) {
1583   return vuzp2_f16(a, b);
1584 }
1585 
1586 // CHECK-LABEL: @test_vuzp2q_f16(
1587 // CHECK:   [[SHUFFLE_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
1588 // CHECK:   ret <8 x half> [[SHUFFLE_I]]
test_vuzp2q_f16(float16x8_t a,float16x8_t b)1589 float16x8_t test_vuzp2q_f16(float16x8_t a, float16x8_t b) {
1590   return vuzp2q_f16(a, b);
1591 }
1592 
1593 // CHECK-LABEL: @test_vtrn1_f16(
1594 // CHECK:   [[SHUFFLE_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
1595 // CHECK:   ret <4 x half> [[SHUFFLE_I]]
test_vtrn1_f16(float16x4_t a,float16x4_t b)1596 float16x4_t test_vtrn1_f16(float16x4_t a, float16x4_t b) {
1597   return vtrn1_f16(a, b);
1598 }
1599 
1600 // CHECK-LABEL: @test_vtrn1q_f16(
1601 // CHECK:   [[SHUFFLE_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32>  <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
1602 // CHECK:   ret <8 x half> [[SHUFFLE_I]]
test_vtrn1q_f16(float16x8_t a,float16x8_t b)1603 float16x8_t test_vtrn1q_f16(float16x8_t a, float16x8_t b) {
1604   return vtrn1q_f16(a, b);
1605 }
1606 
1607 // CHECK-LABEL: @test_vtrn2_f16(
1608 // CHECK:   [[SHUFFLE_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
1609 // CHECK:   ret <4 x half> [[SHUFFLE_I]]
test_vtrn2_f16(float16x4_t a,float16x4_t b)1610 float16x4_t test_vtrn2_f16(float16x4_t a, float16x4_t b) {
1611   return vtrn2_f16(a, b);
1612 }
1613 
1614 // CHECK-LABEL: @test_vtrn2q_f16(
1615 // CHECK:   [[SHUFFLE_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
1616 // CHECK:   ret <8 x half> [[SHUFFLE_I]]
test_vtrn2q_f16(float16x8_t a,float16x8_t b)1617 float16x8_t test_vtrn2q_f16(float16x8_t a, float16x8_t b) {
1618   return vtrn2q_f16(a, b);
1619 }
1620 
1621