1 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon -target-feature +fullfp16 -target-feature +v8.2a\
2 // RUN: -fallow-half-arguments-and-returns -flax-vector-conversions=none -S -disable-O0-optnone -emit-llvm -o - %s \
3 // RUN: | opt -S -mem2reg \
4 // RUN: | FileCheck %s
5
6 // REQUIRES: aarch64-registered-target
7
8 #include <arm_neon.h>
9
10 // CHECK-LABEL: test_vabs_f16
11 // CHECK: [[ABS:%.*]] = call <4 x half> @llvm.fabs.v4f16(<4 x half> %a)
12 // CHECK: ret <4 x half> [[ABS]]
test_vabs_f16(float16x4_t a)13 float16x4_t test_vabs_f16(float16x4_t a) {
14 return vabs_f16(a);
15 }
16
17 // CHECK-LABEL: test_vabsq_f16
18 // CHECK: [[ABS:%.*]] = call <8 x half> @llvm.fabs.v8f16(<8 x half> %a)
19 // CHECK: ret <8 x half> [[ABS]]
test_vabsq_f16(float16x8_t a)20 float16x8_t test_vabsq_f16(float16x8_t a) {
21 return vabsq_f16(a);
22 }
23
24 // CHECK-LABEL: test_vceqz_f16
25 // CHECK: [[TMP1:%.*]] = fcmp oeq <4 x half> %a, zeroinitializer
26 // CHECK: [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
27 // CHECK: ret <4 x i16> [[TMP2]]
test_vceqz_f16(float16x4_t a)28 uint16x4_t test_vceqz_f16(float16x4_t a) {
29 return vceqz_f16(a);
30 }
31
32 // CHECK-LABEL: test_vceqzq_f16
33 // CHECK: [[TMP1:%.*]] = fcmp oeq <8 x half> %a, zeroinitializer
34 // CHECK: [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16>
35 // CHECK: ret <8 x i16> [[TMP2]]
test_vceqzq_f16(float16x8_t a)36 uint16x8_t test_vceqzq_f16(float16x8_t a) {
37 return vceqzq_f16(a);
38 }
39
40 // CHECK-LABEL: test_vcgez_f16
41 // CHECK: [[TMP1:%.*]] = fcmp oge <4 x half> %a, zeroinitializer
42 // CHECK: [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
43 // CHECK: ret <4 x i16> [[TMP2]]
test_vcgez_f16(float16x4_t a)44 uint16x4_t test_vcgez_f16(float16x4_t a) {
45 return vcgez_f16(a);
46 }
47
48 // CHECK-LABEL: test_vcgezq_f16
49 // CHECK: [[TMP1:%.*]] = fcmp oge <8 x half> %a, zeroinitializer
50 // CHECK: [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16>
51 // CHECK: ret <8 x i16> [[TMP2]]
test_vcgezq_f16(float16x8_t a)52 uint16x8_t test_vcgezq_f16(float16x8_t a) {
53 return vcgezq_f16(a);
54 }
55
56 // CHECK-LABEL: test_vcgtz_f16
57 // CHECK: [[TMP1:%.*]] = fcmp ogt <4 x half> %a, zeroinitializer
58 // CHECK: [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
59 // CHECK: ret <4 x i16> [[TMP2]]
test_vcgtz_f16(float16x4_t a)60 uint16x4_t test_vcgtz_f16(float16x4_t a) {
61 return vcgtz_f16(a);
62 }
63
64 // CHECK-LABEL: test_vcgtzq_f16
65 // CHECK: [[TMP1:%.*]] = fcmp ogt <8 x half> %a, zeroinitializer
66 // CHECK: [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16>
67 // CHECK: ret <8 x i16> [[TMP2]]
test_vcgtzq_f16(float16x8_t a)68 uint16x8_t test_vcgtzq_f16(float16x8_t a) {
69 return vcgtzq_f16(a);
70 }
71
72 // CHECK-LABEL: test_vclez_f16
73 // CHECK: [[TMP1:%.*]] = fcmp ole <4 x half> %a, zeroinitializer
74 // CHECK: [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
75 // CHECK: ret <4 x i16> [[TMP2]]
test_vclez_f16(float16x4_t a)76 uint16x4_t test_vclez_f16(float16x4_t a) {
77 return vclez_f16(a);
78 }
79
80 // CHECK-LABEL: test_vclezq_f16
81 // CHECK: [[TMP1:%.*]] = fcmp ole <8 x half> %a, zeroinitializer
82 // CHECK: [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16>
83 // CHECK: ret <8 x i16> [[TMP2]]
test_vclezq_f16(float16x8_t a)84 uint16x8_t test_vclezq_f16(float16x8_t a) {
85 return vclezq_f16(a);
86 }
87
88 // CHECK-LABEL: test_vcltz_f16
89 // CHECK: [[TMP1:%.*]] = fcmp olt <4 x half> %a, zeroinitializer
90 // CHECK: [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
91 // CHECK: ret <4 x i16> [[TMP2]]
test_vcltz_f16(float16x4_t a)92 uint16x4_t test_vcltz_f16(float16x4_t a) {
93 return vcltz_f16(a);
94 }
95
96 // CHECK-LABEL: test_vcltzq_f16
97 // CHECK: [[TMP1:%.*]] = fcmp olt <8 x half> %a, zeroinitializer
98 // CHECK: [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16>
99 // CHECK: ret <8 x i16> [[TMP2]]
test_vcltzq_f16(float16x8_t a)100 uint16x8_t test_vcltzq_f16(float16x8_t a) {
101 return vcltzq_f16(a);
102 }
103
104 // CHECK-LABEL: test_vcvt_f16_s16
105 // CHECK: [[VCVT:%.*]] = sitofp <4 x i16> %a to <4 x half>
106 // CHECK: ret <4 x half> [[VCVT]]
test_vcvt_f16_s16(int16x4_t a)107 float16x4_t test_vcvt_f16_s16 (int16x4_t a) {
108 return vcvt_f16_s16(a);
109 }
110
111 // CHECK-LABEL: test_vcvtq_f16_s16
112 // CHECK: [[VCVT:%.*]] = sitofp <8 x i16> %a to <8 x half>
113 // CHECK: ret <8 x half> [[VCVT]]
test_vcvtq_f16_s16(int16x8_t a)114 float16x8_t test_vcvtq_f16_s16 (int16x8_t a) {
115 return vcvtq_f16_s16(a);
116 }
117
118 // CHECK-LABEL: test_vcvt_f16_u16
119 // CHECK: [[VCVT:%.*]] = uitofp <4 x i16> %a to <4 x half>
120 // CHECK: ret <4 x half> [[VCVT]]
test_vcvt_f16_u16(uint16x4_t a)121 float16x4_t test_vcvt_f16_u16 (uint16x4_t a) {
122 return vcvt_f16_u16(a);
123 }
124
125 // CHECK-LABEL: test_vcvtq_f16_u16
126 // CHECK: [[VCVT:%.*]] = uitofp <8 x i16> %a to <8 x half>
127 // CHECK: ret <8 x half> [[VCVT]]
test_vcvtq_f16_u16(uint16x8_t a)128 float16x8_t test_vcvtq_f16_u16 (uint16x8_t a) {
129 return vcvtq_f16_u16(a);
130 }
131
132 // CHECK-LABEL: test_vcvt_s16_f16
133 // CHECK: [[VCVT:%.*]] = call <4 x i16> @llvm.aarch64.neon.fcvtzs.v4i16.v4f16(<4 x half> %a)
134 // CHECK: ret <4 x i16> [[VCVT]]
test_vcvt_s16_f16(float16x4_t a)135 int16x4_t test_vcvt_s16_f16 (float16x4_t a) {
136 return vcvt_s16_f16(a);
137 }
138
139 // CHECK-LABEL: test_vcvtq_s16_f16
140 // CHECK: [[VCVT:%.*]] = call <8 x i16> @llvm.aarch64.neon.fcvtzs.v8i16.v8f16(<8 x half> %a)
141 // CHECK: ret <8 x i16> [[VCVT]]
test_vcvtq_s16_f16(float16x8_t a)142 int16x8_t test_vcvtq_s16_f16 (float16x8_t a) {
143 return vcvtq_s16_f16(a);
144 }
145
146 // CHECK-LABEL: test_vcvt_u16_f16
147 // CHECK: [[VCVT:%.*]] = call <4 x i16> @llvm.aarch64.neon.fcvtzu.v4i16.v4f16(<4 x half> %a)
148 // CHECK: ret <4 x i16> [[VCVT]]
test_vcvt_u16_f16(float16x4_t a)149 uint16x4_t test_vcvt_u16_f16 (float16x4_t a) {
150 return vcvt_u16_f16(a);
151 }
152
153 // CHECK-LABEL: test_vcvtq_u16_f16
154 // CHECK: [[VCVT:%.*]] = call <8 x i16> @llvm.aarch64.neon.fcvtzu.v8i16.v8f16(<8 x half> %a)
155 // CHECK: ret <8 x i16> [[VCVT]]
test_vcvtq_u16_f16(float16x8_t a)156 uint16x8_t test_vcvtq_u16_f16 (float16x8_t a) {
157 return vcvtq_u16_f16(a);
158 }
159
160 // CHECK-LABEL: test_vcvta_s16_f16
161 // CHECK: [[VCVT:%.*]] = call <4 x i16> @llvm.aarch64.neon.fcvtas.v4i16.v4f16(<4 x half> %a)
162 // CHECK: ret <4 x i16> [[VCVT]]
test_vcvta_s16_f16(float16x4_t a)163 int16x4_t test_vcvta_s16_f16 (float16x4_t a) {
164 return vcvta_s16_f16(a);
165 }
166
167 // CHECK-LABEL: test_vcvta_u16_f16
168 // CHECK: [[VCVT:%.*]] = call <4 x i16> @llvm.aarch64.neon.fcvtau.v4i16.v4f16(<4 x half> %a)
169 // CHECK: ret <4 x i16> [[VCVT]]
test_vcvta_u16_f16(float16x4_t a)170 uint16x4_t test_vcvta_u16_f16 (float16x4_t a) {
171 return vcvta_u16_f16(a);
172 }
173
174 // CHECK-LABEL: test_vcvtaq_s16_f16
175 // CHECK: [[VCVT:%.*]] = call <8 x i16> @llvm.aarch64.neon.fcvtas.v8i16.v8f16(<8 x half> %a)
176 // CHECK: ret <8 x i16> [[VCVT]]
test_vcvtaq_s16_f16(float16x8_t a)177 int16x8_t test_vcvtaq_s16_f16 (float16x8_t a) {
178 return vcvtaq_s16_f16(a);
179 }
180
181 // CHECK-LABEL: test_vcvtm_s16_f16
182 // CHECK: [[VCVT:%.*]] = call <4 x i16> @llvm.aarch64.neon.fcvtms.v4i16.v4f16(<4 x half> %a)
183 // CHECK: ret <4 x i16> [[VCVT]]
test_vcvtm_s16_f16(float16x4_t a)184 int16x4_t test_vcvtm_s16_f16 (float16x4_t a) {
185 return vcvtm_s16_f16(a);
186 }
187
188 // CHECK-LABEL: test_vcvtmq_s16_f16
189 // CHECK: [[VCVT:%.*]] = call <8 x i16> @llvm.aarch64.neon.fcvtms.v8i16.v8f16(<8 x half> %a)
190 // CHECK: ret <8 x i16> [[VCVT]]
test_vcvtmq_s16_f16(float16x8_t a)191 int16x8_t test_vcvtmq_s16_f16 (float16x8_t a) {
192 return vcvtmq_s16_f16(a);
193 }
194
195 // CHECK-LABEL: test_vcvtm_u16_f16
196 // CHECK: [[VCVT:%.*]] = call <4 x i16> @llvm.aarch64.neon.fcvtmu.v4i16.v4f16(<4 x half> %a)
197 // CHECK: ret <4 x i16> [[VCVT]]
test_vcvtm_u16_f16(float16x4_t a)198 uint16x4_t test_vcvtm_u16_f16 (float16x4_t a) {
199 return vcvtm_u16_f16(a);
200 }
201
202 // CHECK-LABEL: test_vcvtmq_u16_f16
203 // CHECK: [[VCVT:%.*]] = call <8 x i16> @llvm.aarch64.neon.fcvtmu.v8i16.v8f16(<8 x half> %a)
204 // CHECK: ret <8 x i16> [[VCVT]]
test_vcvtmq_u16_f16(float16x8_t a)205 uint16x8_t test_vcvtmq_u16_f16 (float16x8_t a) {
206 return vcvtmq_u16_f16(a);
207 }
208
209 // CHECK-LABEL: test_vcvtn_s16_f16
210 // CHECK: [[VCVT:%.*]] = call <4 x i16> @llvm.aarch64.neon.fcvtns.v4i16.v4f16(<4 x half> %a)
211 // CHECK: ret <4 x i16> [[VCVT]]
test_vcvtn_s16_f16(float16x4_t a)212 int16x4_t test_vcvtn_s16_f16 (float16x4_t a) {
213 return vcvtn_s16_f16(a);
214 }
215
216 // CHECK-LABEL: test_vcvtnq_s16_f16
217 // CHECK: [[VCVT:%.*]] = call <8 x i16> @llvm.aarch64.neon.fcvtns.v8i16.v8f16(<8 x half> %a)
218 // CHECK: ret <8 x i16> [[VCVT]]
test_vcvtnq_s16_f16(float16x8_t a)219 int16x8_t test_vcvtnq_s16_f16 (float16x8_t a) {
220 return vcvtnq_s16_f16(a);
221 }
222
223 // CHECK-LABEL: test_vcvtn_u16_f16
224 // CHECK: [[VCVT:%.*]] = call <4 x i16> @llvm.aarch64.neon.fcvtnu.v4i16.v4f16(<4 x half> %a)
225 // CHECK: ret <4 x i16> [[VCVT]]
test_vcvtn_u16_f16(float16x4_t a)226 uint16x4_t test_vcvtn_u16_f16 (float16x4_t a) {
227 return vcvtn_u16_f16(a);
228 }
229
230 // CHECK-LABEL: test_vcvtnq_u16_f16
231 // CHECK: [[VCVT:%.*]] = call <8 x i16> @llvm.aarch64.neon.fcvtnu.v8i16.v8f16(<8 x half> %a)
232 // CHECK: ret <8 x i16> [[VCVT]]
test_vcvtnq_u16_f16(float16x8_t a)233 uint16x8_t test_vcvtnq_u16_f16 (float16x8_t a) {
234 return vcvtnq_u16_f16(a);
235 }
236
237 // CHECK-LABEL: test_vcvtp_s16_f16
238 // CHECK: [[VCVT:%.*]] = call <4 x i16> @llvm.aarch64.neon.fcvtps.v4i16.v4f16(<4 x half> %a)
239 // CHECK: ret <4 x i16> [[VCVT]]
test_vcvtp_s16_f16(float16x4_t a)240 int16x4_t test_vcvtp_s16_f16 (float16x4_t a) {
241 return vcvtp_s16_f16(a);
242 }
243
244 // CHECK-LABEL: test_vcvtpq_s16_f16
245 // CHECK: [[VCVT:%.*]] = call <8 x i16> @llvm.aarch64.neon.fcvtps.v8i16.v8f16(<8 x half> %a)
246 // CHECK: ret <8 x i16> [[VCVT]]
test_vcvtpq_s16_f16(float16x8_t a)247 int16x8_t test_vcvtpq_s16_f16 (float16x8_t a) {
248 return vcvtpq_s16_f16(a);
249 }
250
251 // CHECK-LABEL: test_vcvtp_u16_f16
252 // CHECK: [[VCVT:%.*]] = call <4 x i16> @llvm.aarch64.neon.fcvtpu.v4i16.v4f16(<4 x half> %a)
253 // CHECK: ret <4 x i16> [[VCVT]]
test_vcvtp_u16_f16(float16x4_t a)254 uint16x4_t test_vcvtp_u16_f16 (float16x4_t a) {
255 return vcvtp_u16_f16(a);
256 }
257
258 // CHECK-LABEL: test_vcvtpq_u16_f16
259 // CHECK: [[VCVT:%.*]] = call <8 x i16> @llvm.aarch64.neon.fcvtpu.v8i16.v8f16(<8 x half> %a)
260 // CHECK: ret <8 x i16> [[VCVT]]
test_vcvtpq_u16_f16(float16x8_t a)261 uint16x8_t test_vcvtpq_u16_f16 (float16x8_t a) {
262 return vcvtpq_u16_f16(a);
263 }
264
265 // FIXME: Fix the zero constant when fp16 non-storage-only type becomes available.
266 // CHECK-LABEL: test_vneg_f16
267 // CHECK: [[NEG:%.*]] = fneg <4 x half> %a
268 // CHECK: ret <4 x half> [[NEG]]
test_vneg_f16(float16x4_t a)269 float16x4_t test_vneg_f16(float16x4_t a) {
270 return vneg_f16(a);
271 }
272
273 // CHECK-LABEL: test_vnegq_f16
274 // CHECK: [[NEG:%.*]] = fneg <8 x half> %a
275 // CHECK: ret <8 x half> [[NEG]]
test_vnegq_f16(float16x8_t a)276 float16x8_t test_vnegq_f16(float16x8_t a) {
277 return vnegq_f16(a);
278 }
279
280 // CHECK-LABEL: test_vrecpe_f16
281 // CHECK: [[RCP:%.*]] = call <4 x half> @llvm.aarch64.neon.frecpe.v4f16(<4 x half> %a)
282 // CHECK: ret <4 x half> [[RCP]]
test_vrecpe_f16(float16x4_t a)283 float16x4_t test_vrecpe_f16(float16x4_t a) {
284 return vrecpe_f16(a);
285 }
286
287 // CHECK-LABEL: test_vrecpeq_f16
288 // CHECK: [[RCP:%.*]] = call <8 x half> @llvm.aarch64.neon.frecpe.v8f16(<8 x half> %a)
289 // CHECK: ret <8 x half> [[RCP]]
test_vrecpeq_f16(float16x8_t a)290 float16x8_t test_vrecpeq_f16(float16x8_t a) {
291 return vrecpeq_f16(a);
292 }
293
294 // CHECK-LABEL: test_vrnd_f16
295 // CHECK: [[RND:%.*]] = call <4 x half> @llvm.trunc.v4f16(<4 x half> %a)
296 // CHECK: ret <4 x half> [[RND]]
test_vrnd_f16(float16x4_t a)297 float16x4_t test_vrnd_f16(float16x4_t a) {
298 return vrnd_f16(a);
299 }
300
301 // CHECK-LABEL: test_vrndq_f16
302 // CHECK: [[RND:%.*]] = call <8 x half> @llvm.trunc.v8f16(<8 x half> %a)
303 // CHECK: ret <8 x half> [[RND]]
test_vrndq_f16(float16x8_t a)304 float16x8_t test_vrndq_f16(float16x8_t a) {
305 return vrndq_f16(a);
306 }
307
308 // CHECK-LABEL: test_vrnda_f16
309 // CHECK: [[RND:%.*]] = call <4 x half> @llvm.round.v4f16(<4 x half> %a)
310 // CHECK: ret <4 x half> [[RND]]
test_vrnda_f16(float16x4_t a)311 float16x4_t test_vrnda_f16(float16x4_t a) {
312 return vrnda_f16(a);
313 }
314
315 // CHECK-LABEL: test_vrndaq_f16
316 // CHECK: [[RND:%.*]] = call <8 x half> @llvm.round.v8f16(<8 x half> %a)
317 // CHECK: ret <8 x half> [[RND]]
test_vrndaq_f16(float16x8_t a)318 float16x8_t test_vrndaq_f16(float16x8_t a) {
319 return vrndaq_f16(a);
320 }
321
322 // CHECK-LABEL: test_vrndi_f16
323 // CHECK: [[RND:%.*]] = call <4 x half> @llvm.nearbyint.v4f16(<4 x half> %a)
324 // CHECK: ret <4 x half> [[RND]]
test_vrndi_f16(float16x4_t a)325 float16x4_t test_vrndi_f16(float16x4_t a) {
326 return vrndi_f16(a);
327 }
328
329 // CHECK-LABEL: test_vrndiq_f16
330 // CHECK: [[RND:%.*]] = call <8 x half> @llvm.nearbyint.v8f16(<8 x half> %a)
331 // CHECK: ret <8 x half> [[RND]]
test_vrndiq_f16(float16x8_t a)332 float16x8_t test_vrndiq_f16(float16x8_t a) {
333 return vrndiq_f16(a);
334 }
335
336 // CHECK-LABEL: test_vrndm_f16
337 // CHECK: [[RND:%.*]] = call <4 x half> @llvm.floor.v4f16(<4 x half> %a)
338 // CHECK: ret <4 x half> [[RND]]
test_vrndm_f16(float16x4_t a)339 float16x4_t test_vrndm_f16(float16x4_t a) {
340 return vrndm_f16(a);
341 }
342
343 // CHECK-LABEL: test_vrndmq_f16
344 // CHECK: [[RND:%.*]] = call <8 x half> @llvm.floor.v8f16(<8 x half> %a)
345 // CHECK: ret <8 x half> [[RND]]
test_vrndmq_f16(float16x8_t a)346 float16x8_t test_vrndmq_f16(float16x8_t a) {
347 return vrndmq_f16(a);
348 }
349
350 // CHECK-LABEL: test_vrndn_f16
351 // CHECK: [[RND:%.*]] = call <4 x half> @llvm.roundeven.v4f16(<4 x half> %a)
352 // CHECK: ret <4 x half> [[RND]]
test_vrndn_f16(float16x4_t a)353 float16x4_t test_vrndn_f16(float16x4_t a) {
354 return vrndn_f16(a);
355 }
356
357 // CHECK-LABEL: test_vrndnq_f16
358 // CHECK: [[RND:%.*]] = call <8 x half> @llvm.roundeven.v8f16(<8 x half> %a)
359 // CHECK: ret <8 x half> [[RND]]
test_vrndnq_f16(float16x8_t a)360 float16x8_t test_vrndnq_f16(float16x8_t a) {
361 return vrndnq_f16(a);
362 }
363
364 // CHECK-LABEL: test_vrndp_f16
365 // CHECK: [[RND:%.*]] = call <4 x half> @llvm.ceil.v4f16(<4 x half> %a)
366 // CHECK: ret <4 x half> [[RND]]
test_vrndp_f16(float16x4_t a)367 float16x4_t test_vrndp_f16(float16x4_t a) {
368 return vrndp_f16(a);
369 }
370
371 // CHECK-LABEL: test_vrndpq_f16
372 // CHECK: [[RND:%.*]] = call <8 x half> @llvm.ceil.v8f16(<8 x half> %a)
373 // CHECK: ret <8 x half> [[RND]]
test_vrndpq_f16(float16x8_t a)374 float16x8_t test_vrndpq_f16(float16x8_t a) {
375 return vrndpq_f16(a);
376 }
377
378 // CHECK-LABEL: test_vrndx_f16
379 // CHECK: [[RND:%.*]] = call <4 x half> @llvm.rint.v4f16(<4 x half> %a)
380 // CHECK: ret <4 x half> [[RND]]
test_vrndx_f16(float16x4_t a)381 float16x4_t test_vrndx_f16(float16x4_t a) {
382 return vrndx_f16(a);
383 }
384
385 // CHECK-LABEL: test_vrndxq_f16
386 // CHECK: [[RND:%.*]] = call <8 x half> @llvm.rint.v8f16(<8 x half> %a)
387 // CHECK: ret <8 x half> [[RND]]
test_vrndxq_f16(float16x8_t a)388 float16x8_t test_vrndxq_f16(float16x8_t a) {
389 return vrndxq_f16(a);
390 }
391
392 // CHECK-LABEL: test_vrsqrte_f16
393 // CHECK: [[RND:%.*]] = call <4 x half> @llvm.aarch64.neon.frsqrte.v4f16(<4 x half> %a)
394 // CHECK: ret <4 x half> [[RND]]
test_vrsqrte_f16(float16x4_t a)395 float16x4_t test_vrsqrte_f16(float16x4_t a) {
396 return vrsqrte_f16(a);
397 }
398
399 // CHECK-LABEL: test_vrsqrteq_f16
400 // CHECK: [[RND:%.*]] = call <8 x half> @llvm.aarch64.neon.frsqrte.v8f16(<8 x half> %a)
401 // CHECK: ret <8 x half> [[RND]]
test_vrsqrteq_f16(float16x8_t a)402 float16x8_t test_vrsqrteq_f16(float16x8_t a) {
403 return vrsqrteq_f16(a);
404 }
405
406 // CHECK-LABEL: test_vsqrt_f16
407 // CHECK: [[SQR:%.*]] = call <4 x half> @llvm.sqrt.v4f16(<4 x half> %a)
408 // CHECK: ret <4 x half> [[SQR]]
test_vsqrt_f16(float16x4_t a)409 float16x4_t test_vsqrt_f16(float16x4_t a) {
410 return vsqrt_f16(a);
411 }
412
413 // CHECK-LABEL: test_vsqrtq_f16
414 // CHECK: [[SQR:%.*]] = call <8 x half> @llvm.sqrt.v8f16(<8 x half> %a)
415 // CHECK: ret <8 x half> [[SQR]]
test_vsqrtq_f16(float16x8_t a)416 float16x8_t test_vsqrtq_f16(float16x8_t a) {
417 return vsqrtq_f16(a);
418 }
419
420 // CHECK-LABEL: test_vadd_f16
421 // CHECK: [[ADD:%.*]] = fadd <4 x half> %a, %b
422 // CHECK: ret <4 x half> [[ADD]]
test_vadd_f16(float16x4_t a,float16x4_t b)423 float16x4_t test_vadd_f16(float16x4_t a, float16x4_t b) {
424 return vadd_f16(a, b);
425 }
426
427 // CHECK-LABEL: test_vaddq_f16
428 // CHECK: [[ADD:%.*]] = fadd <8 x half> %a, %b
429 // CHECK: ret <8 x half> [[ADD]]
test_vaddq_f16(float16x8_t a,float16x8_t b)430 float16x8_t test_vaddq_f16(float16x8_t a, float16x8_t b) {
431 return vaddq_f16(a, b);
432 }
433
434 // CHECK-LABEL: test_vabd_f16
435 // CHECK: [[ABD:%.*]] = call <4 x half> @llvm.aarch64.neon.fabd.v4f16(<4 x half> %a, <4 x half> %b)
436 // CHECK: ret <4 x half> [[ABD]]
test_vabd_f16(float16x4_t a,float16x4_t b)437 float16x4_t test_vabd_f16(float16x4_t a, float16x4_t b) {
438 return vabd_f16(a, b);
439 }
440
441 // CHECK-LABEL: test_vabdq_f16
442 // CHECK: [[ABD:%.*]] = call <8 x half> @llvm.aarch64.neon.fabd.v8f16(<8 x half> %a, <8 x half> %b)
443 // CHECK: ret <8 x half> [[ABD]]
test_vabdq_f16(float16x8_t a,float16x8_t b)444 float16x8_t test_vabdq_f16(float16x8_t a, float16x8_t b) {
445 return vabdq_f16(a, b);
446 }
447
448 // CHECK-LABEL: test_vcage_f16
449 // CHECK: [[ABS:%.*]] = call <4 x i16> @llvm.aarch64.neon.facge.v4i16.v4f16(<4 x half> %a, <4 x half> %b)
450 // CHECK: ret <4 x i16> [[ABS]]
test_vcage_f16(float16x4_t a,float16x4_t b)451 uint16x4_t test_vcage_f16(float16x4_t a, float16x4_t b) {
452 return vcage_f16(a, b);
453 }
454
455 // CHECK-LABEL: test_vcageq_f16
456 // CHECK: [[ABS:%.*]] = call <8 x i16> @llvm.aarch64.neon.facge.v8i16.v8f16(<8 x half> %a, <8 x half> %b)
457 // CHECK: ret <8 x i16> [[ABS]]
test_vcageq_f16(float16x8_t a,float16x8_t b)458 uint16x8_t test_vcageq_f16(float16x8_t a, float16x8_t b) {
459 return vcageq_f16(a, b);
460 }
461
462 // CHECK-LABEL: test_vcagt_f16
463 // CHECK: [[ABS:%.*]] = call <4 x i16> @llvm.aarch64.neon.facgt.v4i16.v4f16(<4 x half> %a, <4 x half> %b)
464 // CHECK: ret <4 x i16> [[ABS]]
test_vcagt_f16(float16x4_t a,float16x4_t b)465 uint16x4_t test_vcagt_f16(float16x4_t a, float16x4_t b) {
466 return vcagt_f16(a, b);
467 }
468
469 // CHECK-LABEL: test_vcagtq_f16
470 // CHECK: [[ABS:%.*]] = call <8 x i16> @llvm.aarch64.neon.facgt.v8i16.v8f16(<8 x half> %a, <8 x half> %b)
471 // CHECK: ret <8 x i16> [[ABS]]
test_vcagtq_f16(float16x8_t a,float16x8_t b)472 uint16x8_t test_vcagtq_f16(float16x8_t a, float16x8_t b) {
473 return vcagtq_f16(a, b);
474 }
475
476 // CHECK-LABEL: test_vcale_f16
477 // CHECK: [[ABS:%.*]] = call <4 x i16> @llvm.aarch64.neon.facge.v4i16.v4f16(<4 x half> %b, <4 x half> %a)
478 // CHECK: ret <4 x i16> [[ABS]]
test_vcale_f16(float16x4_t a,float16x4_t b)479 uint16x4_t test_vcale_f16(float16x4_t a, float16x4_t b) {
480 return vcale_f16(a, b);
481 }
482
483 // CHECK-LABEL: test_vcaleq_f16
484 // CHECK: [[ABS:%.*]] = call <8 x i16> @llvm.aarch64.neon.facge.v8i16.v8f16(<8 x half> %b, <8 x half> %a)
485 // CHECK: ret <8 x i16> [[ABS]]
test_vcaleq_f16(float16x8_t a,float16x8_t b)486 uint16x8_t test_vcaleq_f16(float16x8_t a, float16x8_t b) {
487 return vcaleq_f16(a, b);
488 }
489
490 // CHECK-LABEL: test_vcalt_f16
491 // CHECK: [[ABS:%.*]] = call <4 x i16> @llvm.aarch64.neon.facgt.v4i16.v4f16(<4 x half> %b, <4 x half> %a)
492 // CHECK: ret <4 x i16> [[ABS]]
test_vcalt_f16(float16x4_t a,float16x4_t b)493 uint16x4_t test_vcalt_f16(float16x4_t a, float16x4_t b) {
494 return vcalt_f16(a, b);
495 }
496
497 // CHECK-LABEL: test_vcaltq_f16
498 // CHECK: [[ABS:%.*]] = call <8 x i16> @llvm.aarch64.neon.facgt.v8i16.v8f16(<8 x half> %b, <8 x half> %a)
499 // CHECK: ret <8 x i16> [[ABS]]
test_vcaltq_f16(float16x8_t a,float16x8_t b)500 uint16x8_t test_vcaltq_f16(float16x8_t a, float16x8_t b) {
501 return vcaltq_f16(a, b);
502 }
503
504 // CHECK-LABEL: test_vceq_f16
505 // CHECK: [[TMP1:%.*]] = fcmp oeq <4 x half> %a, %b
506 // CHECK: [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
507 // CHECK: ret <4 x i16> [[TMP2]]
test_vceq_f16(float16x4_t a,float16x4_t b)508 uint16x4_t test_vceq_f16(float16x4_t a, float16x4_t b) {
509 return vceq_f16(a, b);
510 }
511
512 // CHECK-LABEL: test_vceqq_f16
513 // CHECK: [[TMP1:%.*]] = fcmp oeq <8 x half> %a, %b
514 // CHECK: [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16>
515 // CHECK: ret <8 x i16> [[TMP2]]
test_vceqq_f16(float16x8_t a,float16x8_t b)516 uint16x8_t test_vceqq_f16(float16x8_t a, float16x8_t b) {
517 return vceqq_f16(a, b);
518 }
519
520 // CHECK-LABEL: test_vcge_f16
521 // CHECK: [[TMP1:%.*]] = fcmp oge <4 x half> %a, %b
522 // CHECK: [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
523 // CHECK: ret <4 x i16> [[TMP2]]
test_vcge_f16(float16x4_t a,float16x4_t b)524 uint16x4_t test_vcge_f16(float16x4_t a, float16x4_t b) {
525 return vcge_f16(a, b);
526 }
527
528 // CHECK-LABEL: test_vcgeq_f16
529 // CHECK: [[TMP1:%.*]] = fcmp oge <8 x half> %a, %b
530 // CHECK: [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16>
531 // CHECK: ret <8 x i16> [[TMP2]]
test_vcgeq_f16(float16x8_t a,float16x8_t b)532 uint16x8_t test_vcgeq_f16(float16x8_t a, float16x8_t b) {
533 return vcgeq_f16(a, b);
534 }
535
536 // CHECK-LABEL: test_vcgt_f16
537 // CHECK: [[TMP1:%.*]] = fcmp ogt <4 x half> %a, %b
538 // CHECK: [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
539 // CHECK: ret <4 x i16> [[TMP2]]
test_vcgt_f16(float16x4_t a,float16x4_t b)540 uint16x4_t test_vcgt_f16(float16x4_t a, float16x4_t b) {
541 return vcgt_f16(a, b);
542 }
543
544 // CHECK-LABEL: test_vcgtq_f16
545 // CHECK: [[TMP1:%.*]] = fcmp ogt <8 x half> %a, %b
546 // CHECK: [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16>
547 // CHECK: ret <8 x i16> [[TMP2]]
test_vcgtq_f16(float16x8_t a,float16x8_t b)548 uint16x8_t test_vcgtq_f16(float16x8_t a, float16x8_t b) {
549 return vcgtq_f16(a, b);
550 }
551
552 // CHECK-LABEL: test_vcle_f16
553 // CHECK: [[TMP1:%.*]] = fcmp ole <4 x half> %a, %b
554 // CHECK: [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
555 // CHECK: ret <4 x i16> [[TMP2]]
test_vcle_f16(float16x4_t a,float16x4_t b)556 uint16x4_t test_vcle_f16(float16x4_t a, float16x4_t b) {
557 return vcle_f16(a, b);
558 }
559
560 // CHECK-LABEL: test_vcleq_f16
561 // CHECK: [[TMP1:%.*]] = fcmp ole <8 x half> %a, %b
562 // CHECK: [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16>
563 // CHECK: ret <8 x i16> [[TMP2]]
test_vcleq_f16(float16x8_t a,float16x8_t b)564 uint16x8_t test_vcleq_f16(float16x8_t a, float16x8_t b) {
565 return vcleq_f16(a, b);
566 }
567
568 // CHECK-LABEL: test_vclt_f16
569 // CHECK: [[TMP1:%.*]] = fcmp olt <4 x half> %a, %b
570 // CHECK: [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i16>
571 // CHECK: ret <4 x i16> [[TMP2]]
test_vclt_f16(float16x4_t a,float16x4_t b)572 uint16x4_t test_vclt_f16(float16x4_t a, float16x4_t b) {
573 return vclt_f16(a, b);
574 }
575
576 // CHECK-LABEL: test_vcltq_f16
577 // CHECK: [[TMP1:%.*]] = fcmp olt <8 x half> %a, %b
578 // CHECK: [[TMP2:%.*]] = sext <8 x i1> [[TMP1:%.*]] to <8 x i16>
579 // CHECK: ret <8 x i16> [[TMP2]]
test_vcltq_f16(float16x8_t a,float16x8_t b)580 uint16x8_t test_vcltq_f16(float16x8_t a, float16x8_t b) {
581 return vcltq_f16(a, b);
582 }
583
584 // CHECK-LABEL: test_vcvt_n_f16_s16
585 // CHECK: [[CVT:%.*]] = call <4 x half> @llvm.aarch64.neon.vcvtfxs2fp.v4f16.v4i16(<4 x i16> %vcvt_n, i32 2)
586 // CHECK: ret <4 x half> [[CVT]]
test_vcvt_n_f16_s16(int16x4_t a)587 float16x4_t test_vcvt_n_f16_s16(int16x4_t a) {
588 return vcvt_n_f16_s16(a, 2);
589 }
590
591 // CHECK-LABEL: test_vcvtq_n_f16_s16
592 // CHECK: [[CVT:%.*]] = call <8 x half> @llvm.aarch64.neon.vcvtfxs2fp.v8f16.v8i16(<8 x i16> %vcvt_n, i32 2)
593 // CHECK: ret <8 x half> [[CVT]]
test_vcvtq_n_f16_s16(int16x8_t a)594 float16x8_t test_vcvtq_n_f16_s16(int16x8_t a) {
595 return vcvtq_n_f16_s16(a, 2);
596 }
597
598 // CHECK-LABEL: test_vcvt_n_f16_u16
599 // CHECK: [[CVT:%.*]] = call <4 x half> @llvm.aarch64.neon.vcvtfxu2fp.v4f16.v4i16(<4 x i16> %vcvt_n, i32 2)
600 // CHECK: ret <4 x half> [[CVT]]
test_vcvt_n_f16_u16(uint16x4_t a)601 float16x4_t test_vcvt_n_f16_u16(uint16x4_t a) {
602 return vcvt_n_f16_u16(a, 2);
603 }
604
605 // CHECK-LABEL: test_vcvtq_n_f16_u16
606 // CHECK: [[CVT:%.*]] = call <8 x half> @llvm.aarch64.neon.vcvtfxu2fp.v8f16.v8i16(<8 x i16> %vcvt_n, i32 2)
607 // CHECK: ret <8 x half> [[CVT]]
test_vcvtq_n_f16_u16(uint16x8_t a)608 float16x8_t test_vcvtq_n_f16_u16(uint16x8_t a) {
609 return vcvtq_n_f16_u16(a, 2);
610 }
611
612 // CHECK-LABEL: test_vcvt_n_s16_f16
613 // CHECK: [[CVT:%.*]] = call <4 x i16> @llvm.aarch64.neon.vcvtfp2fxs.v4i16.v4f16(<4 x half> %vcvt_n, i32 2)
614 // CHECK: ret <4 x i16> [[CVT]]
test_vcvt_n_s16_f16(float16x4_t a)615 int16x4_t test_vcvt_n_s16_f16(float16x4_t a) {
616 return vcvt_n_s16_f16(a, 2);
617 }
618
619 // CHECK-LABEL: test_vcvtq_n_s16_f16
620 // CHECK: [[CVT:%.*]] = call <8 x i16> @llvm.aarch64.neon.vcvtfp2fxs.v8i16.v8f16(<8 x half> %vcvt_n, i32 2)
621 // CHECK: ret <8 x i16> [[CVT]]
test_vcvtq_n_s16_f16(float16x8_t a)622 int16x8_t test_vcvtq_n_s16_f16(float16x8_t a) {
623 return vcvtq_n_s16_f16(a, 2);
624 }
625
626 // CHECK-LABEL: test_vcvt_n_u16_f16
627 // CHECK: [[CVT:%.*]] = call <4 x i16> @llvm.aarch64.neon.vcvtfp2fxu.v4i16.v4f16(<4 x half> %vcvt_n, i32 2)
628 // CHECK: ret <4 x i16> [[CVT]]
test_vcvt_n_u16_f16(float16x4_t a)629 uint16x4_t test_vcvt_n_u16_f16(float16x4_t a) {
630 return vcvt_n_u16_f16(a, 2);
631 }
632
633 // CHECK-LABEL: test_vcvtq_n_u16_f16
634 // CHECK: [[CVT:%.*]] = call <8 x i16> @llvm.aarch64.neon.vcvtfp2fxu.v8i16.v8f16(<8 x half> %vcvt_n, i32 2)
635 // CHECK: ret <8 x i16> [[CVT]]
test_vcvtq_n_u16_f16(float16x8_t a)636 uint16x8_t test_vcvtq_n_u16_f16(float16x8_t a) {
637 return vcvtq_n_u16_f16(a, 2);
638 }
639
640 // CHECK-LABEL: test_vdiv_f16
641 // CHECK: [[DIV:%.*]] = fdiv <4 x half> %a, %b
642 // CHECK: ret <4 x half> [[DIV]]
test_vdiv_f16(float16x4_t a,float16x4_t b)643 float16x4_t test_vdiv_f16(float16x4_t a, float16x4_t b) {
644 return vdiv_f16(a, b);
645 }
646
647 // CHECK-LABEL: test_vdivq_f16
648 // CHECK: [[DIV:%.*]] = fdiv <8 x half> %a, %b
649 // CHECK: ret <8 x half> [[DIV]]
test_vdivq_f16(float16x8_t a,float16x8_t b)650 float16x8_t test_vdivq_f16(float16x8_t a, float16x8_t b) {
651 return vdivq_f16(a, b);
652 }
653
654 // CHECK-LABEL: test_vmax_f16
655 // CHECK: [[MAX:%.*]] = call <4 x half> @llvm.aarch64.neon.fmax.v4f16(<4 x half> %a, <4 x half> %b)
656 // CHECK: ret <4 x half> [[MAX]]
test_vmax_f16(float16x4_t a,float16x4_t b)657 float16x4_t test_vmax_f16(float16x4_t a, float16x4_t b) {
658 return vmax_f16(a, b);
659 }
660
661 // CHECK-LABEL: test_vmaxq_f16
662 // CHECK: [[MAX:%.*]] = call <8 x half> @llvm.aarch64.neon.fmax.v8f16(<8 x half> %a, <8 x half> %b)
663 // CHECK: ret <8 x half> [[MAX]]
test_vmaxq_f16(float16x8_t a,float16x8_t b)664 float16x8_t test_vmaxq_f16(float16x8_t a, float16x8_t b) {
665 return vmaxq_f16(a, b);
666 }
667
668 // CHECK-LABEL: test_vmaxnm_f16
669 // CHECK: [[MAX:%.*]] = call <4 x half> @llvm.aarch64.neon.fmaxnm.v4f16(<4 x half> %a, <4 x half> %b)
670 // CHECK: ret <4 x half> [[MAX]]
test_vmaxnm_f16(float16x4_t a,float16x4_t b)671 float16x4_t test_vmaxnm_f16(float16x4_t a, float16x4_t b) {
672 return vmaxnm_f16(a, b);
673 }
674
675 // CHECK-LABEL: test_vmaxnmq_f16
676 // CHECK: [[MAX:%.*]] = call <8 x half> @llvm.aarch64.neon.fmaxnm.v8f16(<8 x half> %a, <8 x half> %b)
677 // CHECK: ret <8 x half> [[MAX]]
test_vmaxnmq_f16(float16x8_t a,float16x8_t b)678 float16x8_t test_vmaxnmq_f16(float16x8_t a, float16x8_t b) {
679 return vmaxnmq_f16(a, b);
680 }
681
682 // CHECK-LABEL: test_vmin_f16
683 // CHECK: [[MIN:%.*]] = call <4 x half> @llvm.aarch64.neon.fmin.v4f16(<4 x half> %a, <4 x half> %b)
684 // CHECK: ret <4 x half> [[MIN]]
test_vmin_f16(float16x4_t a,float16x4_t b)685 float16x4_t test_vmin_f16(float16x4_t a, float16x4_t b) {
686 return vmin_f16(a, b);
687 }
688
689 // CHECK-LABEL: test_vminq_f16
690 // CHECK: [[MIN:%.*]] = call <8 x half> @llvm.aarch64.neon.fmin.v8f16(<8 x half> %a, <8 x half> %b)
691 // CHECK: ret <8 x half> [[MIN]]
test_vminq_f16(float16x8_t a,float16x8_t b)692 float16x8_t test_vminq_f16(float16x8_t a, float16x8_t b) {
693 return vminq_f16(a, b);
694 }
695
696 // CHECK-LABEL: test_vminnm_f16
697 // CHECK: [[MIN:%.*]] = call <4 x half> @llvm.aarch64.neon.fminnm.v4f16(<4 x half> %a, <4 x half> %b)
698 // CHECK: ret <4 x half> [[MIN]]
test_vminnm_f16(float16x4_t a,float16x4_t b)699 float16x4_t test_vminnm_f16(float16x4_t a, float16x4_t b) {
700 return vminnm_f16(a, b);
701 }
702
703 // CHECK-LABEL: test_vminnmq_f16
704 // CHECK: [[MIN:%.*]] = call <8 x half> @llvm.aarch64.neon.fminnm.v8f16(<8 x half> %a, <8 x half> %b)
705 // CHECK: ret <8 x half> [[MIN]]
test_vminnmq_f16(float16x8_t a,float16x8_t b)706 float16x8_t test_vminnmq_f16(float16x8_t a, float16x8_t b) {
707 return vminnmq_f16(a, b);
708 }
709
710 // CHECK-LABEL: test_vmul_f16
711 // CHECK: [[MUL:%.*]] = fmul <4 x half> %a, %b
712 // CHECK: ret <4 x half> [[MUL]]
test_vmul_f16(float16x4_t a,float16x4_t b)713 float16x4_t test_vmul_f16(float16x4_t a, float16x4_t b) {
714 return vmul_f16(a, b);
715 }
716
717 // CHECK-LABEL: test_vmulq_f16
718 // CHECK: [[MUL:%.*]] = fmul <8 x half> %a, %b
719 // CHECK: ret <8 x half> [[MUL]]
test_vmulq_f16(float16x8_t a,float16x8_t b)720 float16x8_t test_vmulq_f16(float16x8_t a, float16x8_t b) {
721 return vmulq_f16(a, b);
722 }
723
724 // CHECK-LABEL: test_vmulx_f16
725 // CHECK: [[MUL:%.*]] = call <4 x half> @llvm.aarch64.neon.fmulx.v4f16(<4 x half> %a, <4 x half> %b)
726 // CHECK: ret <4 x half> [[MUL]]
test_vmulx_f16(float16x4_t a,float16x4_t b)727 float16x4_t test_vmulx_f16(float16x4_t a, float16x4_t b) {
728 return vmulx_f16(a, b);
729 }
730
731 // CHECK-LABEL: test_vmulxq_f16
732 // CHECK: [[MUL:%.*]] = call <8 x half> @llvm.aarch64.neon.fmulx.v8f16(<8 x half> %a, <8 x half> %b)
733 // CHECK: ret <8 x half> [[MUL]]
test_vmulxq_f16(float16x8_t a,float16x8_t b)734 float16x8_t test_vmulxq_f16(float16x8_t a, float16x8_t b) {
735 return vmulxq_f16(a, b);
736 }
737
738 // CHECK-LABEL: test_vpadd_f16
739 // CHECK: [[ADD:%.*]] = call <4 x half> @llvm.aarch64.neon.faddp.v4f16(<4 x half> %a, <4 x half> %b)
740 // CHECK: ret <4 x half> [[ADD]]
test_vpadd_f16(float16x4_t a,float16x4_t b)741 float16x4_t test_vpadd_f16(float16x4_t a, float16x4_t b) {
742 return vpadd_f16(a, b);
743 }
744
745 // CHECK-LABEL: test_vpaddq_f16
746 // CHECK: [[ADD:%.*]] = call <8 x half> @llvm.aarch64.neon.faddp.v8f16(<8 x half> %a, <8 x half> %b)
747 // CHECK: ret <8 x half> [[ADD]]
test_vpaddq_f16(float16x8_t a,float16x8_t b)748 float16x8_t test_vpaddq_f16(float16x8_t a, float16x8_t b) {
749 return vpaddq_f16(a, b);
750 }
751
752 // CHECK-LABEL: test_vpmax_f16
753 // CHECK: [[MAX:%.*]] = call <4 x half> @llvm.aarch64.neon.fmaxp.v4f16(<4 x half> %a, <4 x half> %b)
754 // CHECK: ret <4 x half> [[MAX]]
test_vpmax_f16(float16x4_t a,float16x4_t b)755 float16x4_t test_vpmax_f16(float16x4_t a, float16x4_t b) {
756 return vpmax_f16(a, b);
757 }
758
759 // CHECK-LABEL: test_vpmaxq_f16
760 // CHECK: [[MAX:%.*]] = call <8 x half> @llvm.aarch64.neon.fmaxp.v8f16(<8 x half> %a, <8 x half> %b)
761 // CHECK: ret <8 x half> [[MAX]]
test_vpmaxq_f16(float16x8_t a,float16x8_t b)762 float16x8_t test_vpmaxq_f16(float16x8_t a, float16x8_t b) {
763 return vpmaxq_f16(a, b);
764 }
765
766 // CHECK-LABEL: test_vpmaxnm_f16
767 // CHECK: [[MAX:%.*]] = call <4 x half> @llvm.aarch64.neon.fmaxnmp.v4f16(<4 x half> %a, <4 x half> %b)
768 // CHECK: ret <4 x half> [[MAX]]
test_vpmaxnm_f16(float16x4_t a,float16x4_t b)769 float16x4_t test_vpmaxnm_f16(float16x4_t a, float16x4_t b) {
770 return vpmaxnm_f16(a, b);
771 }
772
773 // CHECK-LABEL: test_vpmaxnmq_f16
774 // CHECK: [[MAX:%.*]] = call <8 x half> @llvm.aarch64.neon.fmaxnmp.v8f16(<8 x half> %a, <8 x half> %b)
775 // CHECK: ret <8 x half> [[MAX]]
test_vpmaxnmq_f16(float16x8_t a,float16x8_t b)776 float16x8_t test_vpmaxnmq_f16(float16x8_t a, float16x8_t b) {
777 return vpmaxnmq_f16(a, b);
778 }
779
780 // CHECK-LABEL: test_vpmin_f16
781 // CHECK: [[MIN:%.*]] = call <4 x half> @llvm.aarch64.neon.fminp.v4f16(<4 x half> %a, <4 x half> %b)
782 // CHECK: ret <4 x half> [[MIN]]
test_vpmin_f16(float16x4_t a,float16x4_t b)783 float16x4_t test_vpmin_f16(float16x4_t a, float16x4_t b) {
784 return vpmin_f16(a, b);
785 }
786
787 // CHECK-LABEL: test_vpminq_f16
788 // CHECK: [[MIN:%.*]] = call <8 x half> @llvm.aarch64.neon.fminp.v8f16(<8 x half> %a, <8 x half> %b)
789 // CHECK: ret <8 x half> [[MIN]]
test_vpminq_f16(float16x8_t a,float16x8_t b)790 float16x8_t test_vpminq_f16(float16x8_t a, float16x8_t b) {
791 return vpminq_f16(a, b);
792 }
793
794 // CHECK-LABEL: test_vpminnm_f16
795 // CHECK: [[MIN:%.*]] = call <4 x half> @llvm.aarch64.neon.fminnmp.v4f16(<4 x half> %a, <4 x half> %b)
796 // CHECK: ret <4 x half> [[MIN]]
test_vpminnm_f16(float16x4_t a,float16x4_t b)797 float16x4_t test_vpminnm_f16(float16x4_t a, float16x4_t b) {
798 return vpminnm_f16(a, b);
799 }
800
801 // CHECK-LABEL: test_vpminnmq_f16
802 // CHECK: [[MIN:%.*]] = call <8 x half> @llvm.aarch64.neon.fminnmp.v8f16(<8 x half> %a, <8 x half> %b)
803 // CHECK: ret <8 x half> [[MIN]]
test_vpminnmq_f16(float16x8_t a,float16x8_t b)804 float16x8_t test_vpminnmq_f16(float16x8_t a, float16x8_t b) {
805 return vpminnmq_f16(a, b);
806 }
807
808 // CHECK-LABEL: test_vrecps_f16
809 // CHECK: [[MIN:%.*]] = call <4 x half> @llvm.aarch64.neon.frecps.v4f16(<4 x half> %a, <4 x half> %b)
810 // CHECK: ret <4 x half> [[MIN]]
test_vrecps_f16(float16x4_t a,float16x4_t b)811 float16x4_t test_vrecps_f16(float16x4_t a, float16x4_t b) {
812 return vrecps_f16(a, b);
813 }
814
815 // CHECK-LABEL: test_vrecpsq_f16
816 // CHECK: [[MIN:%.*]] = call <8 x half> @llvm.aarch64.neon.frecps.v8f16(<8 x half> %a, <8 x half> %b)
817 // CHECK: ret <8 x half> [[MIN]]
test_vrecpsq_f16(float16x8_t a,float16x8_t b)818 float16x8_t test_vrecpsq_f16(float16x8_t a, float16x8_t b) {
819 return vrecpsq_f16(a, b);
820 }
821
822 // CHECK-LABEL: test_vrsqrts_f16
823 // CHECK: [[MIN:%.*]] = call <4 x half> @llvm.aarch64.neon.frsqrts.v4f16(<4 x half> %a, <4 x half> %b)
824 // CHECK: ret <4 x half> [[MIN]]
test_vrsqrts_f16(float16x4_t a,float16x4_t b)825 float16x4_t test_vrsqrts_f16(float16x4_t a, float16x4_t b) {
826 return vrsqrts_f16(a, b);
827 }
828
829 // CHECK-LABEL: test_vrsqrtsq_f16
830 // CHECK: [[MIN:%.*]] = call <8 x half> @llvm.aarch64.neon.frsqrts.v8f16(<8 x half> %a, <8 x half> %b)
831 // CHECK: ret <8 x half> [[MIN]]
test_vrsqrtsq_f16(float16x8_t a,float16x8_t b)832 float16x8_t test_vrsqrtsq_f16(float16x8_t a, float16x8_t b) {
833 return vrsqrtsq_f16(a, b);
834 }
835
836 // CHECK-LABEL: test_vsub_f16
837 // CHECK: [[ADD:%.*]] = fsub <4 x half> %a, %b
838 // CHECK: ret <4 x half> [[ADD]]
test_vsub_f16(float16x4_t a,float16x4_t b)839 float16x4_t test_vsub_f16(float16x4_t a, float16x4_t b) {
840 return vsub_f16(a, b);
841 }
842
843 // CHECK-LABEL: test_vsubq_f16
844 // CHECK: [[ADD:%.*]] = fsub <8 x half> %a, %b
845 // CHECK: ret <8 x half> [[ADD]]
test_vsubq_f16(float16x8_t a,float16x8_t b)846 float16x8_t test_vsubq_f16(float16x8_t a, float16x8_t b) {
847 return vsubq_f16(a, b);
848 }
849
850 // CHECK-LABEL: test_vfma_f16
851 // CHECK: [[ADD:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> %b, <4 x half> %c, <4 x half> %a)
852 // CHECK: ret <4 x half> [[ADD]]
test_vfma_f16(float16x4_t a,float16x4_t b,float16x4_t c)853 float16x4_t test_vfma_f16(float16x4_t a, float16x4_t b, float16x4_t c) {
854 return vfma_f16(a, b, c);
855 }
856
857 // CHECK-LABEL: test_vfmaq_f16
858 // CHECK: [[ADD:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> %b, <8 x half> %c, <8 x half> %a)
859 // CHECK: ret <8 x half> [[ADD]]
test_vfmaq_f16(float16x8_t a,float16x8_t b,float16x8_t c)860 float16x8_t test_vfmaq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
861 return vfmaq_f16(a, b, c);
862 }
863
864 // CHECK-LABEL: test_vfms_f16
865 // CHECK: [[SUB:%.*]] = fneg <4 x half> %b
866 // CHECK: [[ADD:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[SUB]], <4 x half> %c, <4 x half> %a)
867 // CHECK: ret <4 x half> [[ADD]]
test_vfms_f16(float16x4_t a,float16x4_t b,float16x4_t c)868 float16x4_t test_vfms_f16(float16x4_t a, float16x4_t b, float16x4_t c) {
869 return vfms_f16(a, b, c);
870 }
871
872 // CHECK-LABEL: test_vfmsq_f16
873 // CHECK: [[SUB:%.*]] = fneg <8 x half> %b
874 // CHECK: [[ADD:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[SUB]], <8 x half> %c, <8 x half> %a)
875 // CHECK: ret <8 x half> [[ADD]]
test_vfmsq_f16(float16x8_t a,float16x8_t b,float16x8_t c)876 float16x8_t test_vfmsq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
877 return vfmsq_f16(a, b, c);
878 }
879
880 // CHECK-LABEL: test_vfma_lane_f16
881 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8>
882 // CHECK: [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8>
883 // CHECK: [[TMP2:%.*]] = bitcast <4 x half> %c to <8 x i8>
884 // CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x half>
885 // CHECK: [[LANE:%.*]] = shufflevector <4 x half> [[TMP3]], <4 x half> [[TMP3]], <4 x i32> <i32 3, i32 3, i32 3, i32 3>
886 // CHECK: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half>
887 // CHECK: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
888 // CHECK: [[FMLA:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[TMP4]], <4 x half> [[LANE]], <4 x half> [[TMP5]])
889 // CHECK: ret <4 x half> [[FMLA]]
test_vfma_lane_f16(float16x4_t a,float16x4_t b,float16x4_t c)890 float16x4_t test_vfma_lane_f16(float16x4_t a, float16x4_t b, float16x4_t c) {
891 return vfma_lane_f16(a, b, c, 3);
892 }
893
894 // CHECK-LABEL: test_vfmaq_lane_f16
895 // CHECK: [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8>
896 // CHECK: [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8>
897 // CHECK: [[TMP2:%.*]] = bitcast <4 x half> %c to <8 x i8>
898 // CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x half>
899 // CHECK: [[LANE:%.*]] = shufflevector <4 x half> [[TMP3]], <4 x half> [[TMP3]], <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
900 // CHECK: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half>
901 // CHECK: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
902 // CHECK: [[FMLA:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[TMP4]], <8 x half> [[LANE]], <8 x half> [[TMP5]])
903 // CHECK: ret <8 x half> [[FMLA]]
test_vfmaq_lane_f16(float16x8_t a,float16x8_t b,float16x4_t c)904 float16x8_t test_vfmaq_lane_f16(float16x8_t a, float16x8_t b, float16x4_t c) {
905 return vfmaq_lane_f16(a, b, c, 3);
906 }
907
908 // CHECK-LABEL: test_vfma_laneq_f16
909 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8>
910 // CHECK: [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8>
911 // CHECK: [[TMP2:%.*]] = bitcast <8 x half> %c to <16 x i8>
912 // CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
913 // CHECK: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half>
914 // CHECK: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x half>
915 // CHECK: [[LANE:%.*]] = shufflevector <8 x half> [[TMP5]], <8 x half> [[TMP5]], <4 x i32> <i32 7, i32 7, i32 7, i32 7>
916 // CHECK: [[FMLA:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[LANE]], <4 x half> [[TMP4]], <4 x half> [[TMP3]])
917 // CHECK: ret <4 x half> [[FMLA]]
test_vfma_laneq_f16(float16x4_t a,float16x4_t b,float16x8_t c)918 float16x4_t test_vfma_laneq_f16(float16x4_t a, float16x4_t b, float16x8_t c) {
919 return vfma_laneq_f16(a, b, c, 7);
920 }
921
922 // CHECK-LABEL: test_vfmaq_laneq_f16
923 // CHECK: [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8>
924 // CHECK: [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8>
925 // CHECK: [[TMP2:%.*]] = bitcast <8 x half> %c to <16 x i8>
926 // CHECK: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
927 // CHECK: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half>
928 // CHECK: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x half>
929 // CHECK: [[LANE:%.*]] = shufflevector <8 x half> [[TMP5]], <8 x half> [[TMP5]], <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
930 // CHECK: [[FMLA:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[LANE]], <8 x half> [[TMP4]], <8 x half> [[TMP3]])
931 // CHECK: ret <8 x half> [[FMLA]]
test_vfmaq_laneq_f16(float16x8_t a,float16x8_t b,float16x8_t c)932 float16x8_t test_vfmaq_laneq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
933 return vfmaq_laneq_f16(a, b, c, 7);
934 }
935
936 // CHECK-LABEL: test_vfma_n_f16
937 // CHECK: [[TMP0:%.*]] = insertelement <4 x half> undef, half %c, i32 0
938 // CHECK: [[TMP1:%.*]] = insertelement <4 x half> [[TMP0]], half %c, i32 1
939 // CHECK: [[TMP2:%.*]] = insertelement <4 x half> [[TMP1]], half %c, i32 2
940 // CHECK: [[TMP3:%.*]] = insertelement <4 x half> [[TMP2]], half %c, i32 3
941 // CHECK: [[FMA:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> %b, <4 x half> [[TMP3]], <4 x half> %a)
942 // CHECK: ret <4 x half> [[FMA]]
test_vfma_n_f16(float16x4_t a,float16x4_t b,float16_t c)943 float16x4_t test_vfma_n_f16(float16x4_t a, float16x4_t b, float16_t c) {
944 return vfma_n_f16(a, b, c);
945 }
946
947 // CHECK-LABEL: test_vfmaq_n_f16
948 // CHECK: [[TMP0:%.*]] = insertelement <8 x half> undef, half %c, i32 0
949 // CHECK: [[TMP1:%.*]] = insertelement <8 x half> [[TMP0]], half %c, i32 1
950 // CHECK: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half %c, i32 2
951 // CHECK: [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half %c, i32 3
952 // CHECK: [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half %c, i32 4
953 // CHECK: [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half %c, i32 5
954 // CHECK: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half %c, i32 6
955 // CHECK: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half %c, i32 7
956 // CHECK: [[FMA:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> %b, <8 x half> [[TMP7]], <8 x half> %a)
957 // CHECK: ret <8 x half> [[FMA]]
test_vfmaq_n_f16(float16x8_t a,float16x8_t b,float16_t c)958 float16x8_t test_vfmaq_n_f16(float16x8_t a, float16x8_t b, float16_t c) {
959 return vfmaq_n_f16(a, b, c);
960 }
961
962 // CHECK-LABEL: test_vfmah_lane_f16
963 // CHECK: [[EXTR:%.*]] = extractelement <4 x half> %c, i32 3
964 // CHECK: [[FMA:%.*]] = call half @llvm.fma.f16(half %b, half [[EXTR]], half %a)
965 // CHECK: ret half [[FMA]]
test_vfmah_lane_f16(float16_t a,float16_t b,float16x4_t c)966 float16_t test_vfmah_lane_f16(float16_t a, float16_t b, float16x4_t c) {
967 return vfmah_lane_f16(a, b, c, 3);
968 }
969
970 // CHECK-LABEL: test_vfmah_laneq_f16
971 // CHECK: [[EXTR:%.*]] = extractelement <8 x half> %c, i32 7
972 // CHECK: [[FMA:%.*]] = call half @llvm.fma.f16(half %b, half [[EXTR]], half %a)
973 // CHECK: ret half [[FMA]]
test_vfmah_laneq_f16(float16_t a,float16_t b,float16x8_t c)974 float16_t test_vfmah_laneq_f16(float16_t a, float16_t b, float16x8_t c) {
975 return vfmah_laneq_f16(a, b, c, 7);
976 }
977
978 // CHECK-LABEL: test_vfms_lane_f16
979 // CHECK: [[SUB:%.*]] = fneg <4 x half> %b
980 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8>
981 // CHECK: [[TMP1:%.*]] = bitcast <4 x half> [[SUB]] to <8 x i8>
982 // CHECK: [[TMP2:%.*]] = bitcast <4 x half> %c to <8 x i8>
983 // CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x half>
984 // CHECK: [[LANE:%.*]] = shufflevector <4 x half> [[TMP3]], <4 x half> [[TMP3]], <4 x i32> <i32 3, i32 3, i32 3, i32 3>
985 // CHECK: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half>
986 // CHECK: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
987 // CHECK: [[FMA:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[TMP4]], <4 x half> [[LANE]], <4 x half> [[TMP5]])
988 // CHECK: ret <4 x half> [[FMA]]
test_vfms_lane_f16(float16x4_t a,float16x4_t b,float16x4_t c)989 float16x4_t test_vfms_lane_f16(float16x4_t a, float16x4_t b, float16x4_t c) {
990 return vfms_lane_f16(a, b, c, 3);
991 }
992
993 // CHECK-LABEL: test_vfmsq_lane_f16
994 // CHECK: [[SUB:%.*]] = fneg <8 x half> %b
995 // CHECK: [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8>
996 // CHECK: [[TMP1:%.*]] = bitcast <8 x half> [[SUB]] to <16 x i8>
997 // CHECK: [[TMP2:%.*]] = bitcast <4 x half> %c to <8 x i8>
998 // CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x half>
999 // CHECK: [[LANE:%.*]] = shufflevector <4 x half> [[TMP3]], <4 x half> [[TMP3]], <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
1000 // CHECK: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half>
1001 // CHECK: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
1002 // CHECK: [[FMLA:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[TMP4]], <8 x half> [[LANE]], <8 x half> [[TMP5]])
1003 // CHECK: ret <8 x half> [[FMLA]]
test_vfmsq_lane_f16(float16x8_t a,float16x8_t b,float16x4_t c)1004 float16x8_t test_vfmsq_lane_f16(float16x8_t a, float16x8_t b, float16x4_t c) {
1005 return vfmsq_lane_f16(a, b, c, 3);
1006 }
1007
1008 // CHECK-LABEL: test_vfms_laneq_f16
1009 // CHECK: [[SUB:%.*]] = fneg <4 x half> %b
1010 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8>
1011 // CHECK: [[TMP1:%.*]] = bitcast <4 x half> [[SUB]] to <8 x i8>
1012 // CHECK: [[TMP2:%.*]] = bitcast <8 x half> %c to <16 x i8>
1013 // CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
1014 // CHECK: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half>
1015 // CHECK: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x half>
1016 // CHECK: [[LANE:%.*]] = shufflevector <8 x half> [[TMP5]], <8 x half> [[TMP5]], <4 x i32> <i32 7, i32 7, i32 7, i32 7>
1017 // CHECK: [[FMLA:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[LANE]], <4 x half> [[TMP4]], <4 x half> [[TMP3]])
1018 // CHECK: ret <4 x half> [[FMLA]]
test_vfms_laneq_f16(float16x4_t a,float16x4_t b,float16x8_t c)1019 float16x4_t test_vfms_laneq_f16(float16x4_t a, float16x4_t b, float16x8_t c) {
1020 return vfms_laneq_f16(a, b, c, 7);
1021 }
1022
1023 // CHECK-LABEL: test_vfmsq_laneq_f16
1024 // CHECK: [[SUB:%.*]] = fneg <8 x half> %b
1025 // CHECK: [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8>
1026 // CHECK: [[TMP1:%.*]] = bitcast <8 x half> [[SUB]] to <16 x i8>
1027 // CHECK: [[TMP2:%.*]] = bitcast <8 x half> %c to <16 x i8>
1028 // CHECK: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
1029 // CHECK: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half>
1030 // CHECK: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x half>
1031 // CHECK: [[LANE:%.*]] = shufflevector <8 x half> [[TMP5]], <8 x half> [[TMP5]], <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
1032 // CHECK: [[FMLA:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[LANE]], <8 x half> [[TMP4]], <8 x half> [[TMP3]])
1033 // CHECK: ret <8 x half> [[FMLA]]
test_vfmsq_laneq_f16(float16x8_t a,float16x8_t b,float16x8_t c)1034 float16x8_t test_vfmsq_laneq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
1035 return vfmsq_laneq_f16(a, b, c, 7);
1036 }
1037
1038 // CHECK-LABEL: test_vfms_n_f16
1039 // CHECK: [[SUB:%.*]] = fneg <4 x half> %b
1040 // CHECK: [[TMP0:%.*]] = insertelement <4 x half> undef, half %c, i32 0
1041 // CHECK: [[TMP1:%.*]] = insertelement <4 x half> [[TMP0]], half %c, i32 1
1042 // CHECK: [[TMP2:%.*]] = insertelement <4 x half> [[TMP1]], half %c, i32 2
1043 // CHECK: [[TMP3:%.*]] = insertelement <4 x half> [[TMP2]], half %c, i32 3
1044 // CHECK: [[FMA:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[SUB]], <4 x half> [[TMP3]], <4 x half> %a)
1045 // CHECK: ret <4 x half> [[FMA]]
test_vfms_n_f16(float16x4_t a,float16x4_t b,float16_t c)1046 float16x4_t test_vfms_n_f16(float16x4_t a, float16x4_t b, float16_t c) {
1047 return vfms_n_f16(a, b, c);
1048 }
1049
1050 // CHECK-LABEL: test_vfmsq_n_f16
1051 // CHECK: [[SUB:%.*]] = fneg <8 x half> %b
1052 // CHECK: [[TMP0:%.*]] = insertelement <8 x half> undef, half %c, i32 0
1053 // CHECK: [[TMP1:%.*]] = insertelement <8 x half> [[TMP0]], half %c, i32 1
1054 // CHECK: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half %c, i32 2
1055 // CHECK: [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half %c, i32 3
1056 // CHECK: [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half %c, i32 4
1057 // CHECK: [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half %c, i32 5
1058 // CHECK: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half %c, i32 6
1059 // CHECK: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half %c, i32 7
1060 // CHECK: [[FMA:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[SUB]], <8 x half> [[TMP7]], <8 x half> %a)
1061 // CHECK: ret <8 x half> [[FMA]]
test_vfmsq_n_f16(float16x8_t a,float16x8_t b,float16_t c)1062 float16x8_t test_vfmsq_n_f16(float16x8_t a, float16x8_t b, float16_t c) {
1063 return vfmsq_n_f16(a, b, c);
1064 }
1065
1066 // CHECK-LABEL: test_vfmsh_lane_f16
1067 // CHECK: [[TMP0:%.*]] = fpext half %b to float
1068 // CHECK: [[TMP1:%.*]] = fneg float [[TMP0]]
1069 // CHECK: [[SUB:%.*]] = fptrunc float [[TMP1]] to half
1070 // CHECK: [[EXTR:%.*]] = extractelement <4 x half> %c, i32 3
1071 // CHECK: [[FMA:%.*]] = call half @llvm.fma.f16(half [[SUB]], half [[EXTR]], half %a)
1072 // CHECK: ret half [[FMA]]
test_vfmsh_lane_f16(float16_t a,float16_t b,float16x4_t c)1073 float16_t test_vfmsh_lane_f16(float16_t a, float16_t b, float16x4_t c) {
1074 return vfmsh_lane_f16(a, b, c, 3);
1075 }
1076
1077 // CHECK-LABEL: test_vfmsh_laneq_f16
1078 // CHECK: [[TMP0:%.*]] = fpext half %b to float
1079 // CHECK: [[TMP1:%.*]] = fneg float [[TMP0]]
1080 // CHECK: [[SUB:%.*]] = fptrunc float [[TMP1]] to half
1081 // CHECK: [[EXTR:%.*]] = extractelement <8 x half> %c, i32 7
1082 // CHECK: [[FMA:%.*]] = call half @llvm.fma.f16(half [[SUB]], half [[EXTR]], half %a)
1083 // CHECK: ret half [[FMA]]
test_vfmsh_laneq_f16(float16_t a,float16_t b,float16x8_t c)1084 float16_t test_vfmsh_laneq_f16(float16_t a, float16_t b, float16x8_t c) {
1085 return vfmsh_laneq_f16(a, b, c, 7);
1086 }
1087
1088 // CHECK-LABEL: test_vmul_lane_f16
1089 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
1090 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
1091 // CHECK: [[LANE:%.*]] = shufflevector <4 x half> [[TMP1]], <4 x half> [[TMP1]], <4 x i32> <i32 3, i32 3, i32 3, i32 3>
1092 // CHECK: [[MUL:%.*]] = fmul <4 x half> [[A:%.*]], [[LANE]]
1093 // CHECK: ret <4 x half> [[MUL]]
test_vmul_lane_f16(float16x4_t a,float16x4_t b)1094 float16x4_t test_vmul_lane_f16(float16x4_t a, float16x4_t b) {
1095 return vmul_lane_f16(a, b, 3);
1096 }
1097
1098 // CHECK-LABEL: test_vmulq_lane_f16
1099 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
1100 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
1101 // CHECK: [[LANE:%.*]] = shufflevector <4 x half> [[TMP1]], <4 x half> [[TMP1]], <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
1102 // CHECK: [[MUL:%.*]] = fmul <8 x half> [[A:%.*]], [[LANE]]
1103 // CHECK: ret <8 x half> [[MUL]]
test_vmulq_lane_f16(float16x8_t a,float16x4_t b)1104 float16x8_t test_vmulq_lane_f16(float16x8_t a, float16x4_t b) {
1105 return vmulq_lane_f16(a, b, 3);
1106 }
1107
1108 // CHECK-LABEL: test_vmul_laneq_f16
1109 // CHECK: [[TMP0:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
1110 // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
1111 // CHECK: [[LANE:%.*]] = shufflevector <8 x half> [[TMP1]], <8 x half> [[TMP1]], <4 x i32> <i32 7, i32 7, i32 7, i32 7>
1112 // CHECK: [[MUL:%.*]] = fmul <4 x half> [[A:%.*]], [[LANE]]
1113 // CHECK: ret <4 x half> [[MUL]]
test_vmul_laneq_f16(float16x4_t a,float16x8_t b)1114 float16x4_t test_vmul_laneq_f16(float16x4_t a, float16x8_t b) {
1115 return vmul_laneq_f16(a, b, 7);
1116 }
1117
1118 // CHECK-LABEL: test_vmulq_laneq_f16
1119 // CHECK: [[TMP0:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
1120 // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
1121 // CHECK: [[LANE:%.*]] = shufflevector <8 x half> [[TMP1]], <8 x half> [[TMP1]], <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
1122 // CHECK: [[MUL:%.*]] = fmul <8 x half> [[A:%.*]], [[LANE]]
1123 // CHECK: ret <8 x half> [[MUL]]
test_vmulq_laneq_f16(float16x8_t a,float16x8_t b)1124 float16x8_t test_vmulq_laneq_f16(float16x8_t a, float16x8_t b) {
1125 return vmulq_laneq_f16(a, b, 7);
1126 }
1127
1128 // CHECK-LABEL: test_vmul_n_f16
1129 // CHECK: [[TMP0:%.*]] = insertelement <4 x half> undef, half %b, i32 0
1130 // CHECK: [[TMP1:%.*]] = insertelement <4 x half> [[TMP0]], half %b, i32 1
1131 // CHECK: [[TMP2:%.*]] = insertelement <4 x half> [[TMP1]], half %b, i32 2
1132 // CHECK: [[TMP3:%.*]] = insertelement <4 x half> [[TMP2]], half %b, i32 3
1133 // CHECK: [[MUL:%.*]] = fmul <4 x half> %a, [[TMP3]]
1134 // CHECK: ret <4 x half> [[MUL]]
test_vmul_n_f16(float16x4_t a,float16_t b)1135 float16x4_t test_vmul_n_f16(float16x4_t a, float16_t b) {
1136 return vmul_n_f16(a, b);
1137 }
1138
1139 // CHECK-LABEL: test_vmulq_n_f16
1140 // CHECK: [[TMP0:%.*]] = insertelement <8 x half> undef, half %b, i32 0
1141 // CHECK: [[TMP1:%.*]] = insertelement <8 x half> [[TMP0]], half %b, i32 1
1142 // CHECK: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half %b, i32 2
1143 // CHECK: [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half %b, i32 3
1144 // CHECK: [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half %b, i32 4
1145 // CHECK: [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half %b, i32 5
1146 // CHECK: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half %b, i32 6
1147 // CHECK: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half %b, i32 7
1148 // CHECK: [[MUL:%.*]] = fmul <8 x half> %a, [[TMP7]]
1149 // CHECK: ret <8 x half> [[MUL]]
test_vmulq_n_f16(float16x8_t a,float16_t b)1150 float16x8_t test_vmulq_n_f16(float16x8_t a, float16_t b) {
1151 return vmulq_n_f16(a, b);
1152 }
1153
1154 // FIXME: Fix it when fp16 non-storage-only type becomes available.
1155 // CHECK-LABEL: test_vmulh_lane_f16
1156 // CHECK: [[CONV0:%.*]] = fpext half %a to float
1157 // CHECK: [[CONV1:%.*]] = fpext half %{{.*}} to float
1158 // CHECK: [[MUL:%.*]] = fmul float [[CONV0:%.*]], [[CONV0:%.*]]
1159 // CHECK: [[CONV3:%.*]] = fptrunc float %mul to half
1160 // CHECK: ret half [[CONV3:%.*]]
test_vmulh_lane_f16(float16_t a,float16x4_t b)1161 float16_t test_vmulh_lane_f16(float16_t a, float16x4_t b) {
1162 return vmulh_lane_f16(a, b, 3);
1163 }
1164
1165 // CHECK-LABEL: test_vmulh_laneq_f16
1166 // CHECK: [[CONV0:%.*]] = fpext half %a to float
1167 // CHECK: [[CONV1:%.*]] = fpext half %{{.*}} to float
1168 // CHECK: [[MUL:%.*]] = fmul float [[CONV0:%.*]], [[CONV0:%.*]]
1169 // CHECK: [[CONV3:%.*]] = fptrunc float %mul to half
1170 // CHECK: ret half [[CONV3:%.*]]
test_vmulh_laneq_f16(float16_t a,float16x8_t b)1171 float16_t test_vmulh_laneq_f16(float16_t a, float16x8_t b) {
1172 return vmulh_laneq_f16(a, b, 7);
1173 }
1174
1175 // CHECK-LABEL: test_vmulx_lane_f16
1176 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
1177 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
1178 // CHECK: [[LANE:%.*]] = shufflevector <4 x half> [[TMP1]], <4 x half> [[TMP1]], <4 x i32> <i32 3, i32 3, i32 3, i32 3>
1179 // CHECK: [[TMP2:%.*]] = bitcast <4 x half> [[A:%.*]] to <8 x i8>
1180 // CHECK: [[TMP3:%.*]] = bitcast <4 x half> [[LANE]] to <8 x i8>
1181 // CHECK: [[VMULX2_I:%.*]] = call <4 x half> @llvm.aarch64.neon.fmulx.v4f16(<4 x half> [[A]], <4 x half> [[LANE]]) #4
1182 // CHECK: ret <4 x half> [[VMULX2_I]]
test_vmulx_lane_f16(float16x4_t a,float16x4_t b)1183 float16x4_t test_vmulx_lane_f16(float16x4_t a, float16x4_t b) {
1184 return vmulx_lane_f16(a, b, 3);
1185 }
1186
1187 // CHECK-LABEL: test_vmulxq_lane_f16
1188 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
1189 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
1190 // CHECK: [[LANE:%.*]] = shufflevector <4 x half> [[TMP1]], <4 x half> [[TMP1]], <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
1191 // CHECK: [[TMP2:%.*]] = bitcast <8 x half> [[A:%.*]] to <16 x i8>
1192 // CHECK: [[TMP3:%.*]] = bitcast <8 x half> [[LANE]] to <16 x i8>
1193 // CHECK: [[VMULX2_I:%.*]] = call <8 x half> @llvm.aarch64.neon.fmulx.v8f16(<8 x half> [[A]], <8 x half> [[LANE]]) #4
1194 // CHECK: ret <8 x half> [[VMULX2_I]]
test_vmulxq_lane_f16(float16x8_t a,float16x4_t b)1195 float16x8_t test_vmulxq_lane_f16(float16x8_t a, float16x4_t b) {
1196 return vmulxq_lane_f16(a, b, 3);
1197 }
1198
1199 // CHECK-LABEL: test_vmulx_laneq_f16
1200 // CHECK: [[TMP0:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
1201 // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
1202 // CHECK: [[LANE:%.*]] = shufflevector <8 x half> [[TMP1]], <8 x half> [[TMP1]], <4 x i32> <i32 7, i32 7, i32 7, i32 7>
1203 // CHECK: [[TMP2:%.*]] = bitcast <4 x half> [[A:%.*]] to <8 x i8>
1204 // CHECK: [[TMP3:%.*]] = bitcast <4 x half> [[LANE]] to <8 x i8>
1205 // CHECK: [[VMULX2_I:%.*]] = call <4 x half> @llvm.aarch64.neon.fmulx.v4f16(<4 x half> [[A]], <4 x half> [[LANE]]) #4
1206 // CHECK: ret <4 x half> [[VMULX2_I]]
test_vmulx_laneq_f16(float16x4_t a,float16x8_t b)1207 float16x4_t test_vmulx_laneq_f16(float16x4_t a, float16x8_t b) {
1208 return vmulx_laneq_f16(a, b, 7);
1209 }
1210
1211 // CHECK-LABEL: test_vmulxq_laneq_f16
1212 // CHECK: [[TMP0:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
1213 // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
1214 // CHECK: [[LANE:%.*]] = shufflevector <8 x half> [[TMP1]], <8 x half> [[TMP1]], <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
1215 // CHECK: [[TMP2:%.*]] = bitcast <8 x half> [[A:%.*]] to <16 x i8>
1216 // CHECK: [[TMP3:%.*]] = bitcast <8 x half> [[LANE]] to <16 x i8>
1217 // CHECK: [[VMULX2_I:%.*]] = call <8 x half> @llvm.aarch64.neon.fmulx.v8f16(<8 x half> [[A]], <8 x half> [[LANE]]) #4
1218 // CHECK: ret <8 x half> [[VMULX2_I]]
test_vmulxq_laneq_f16(float16x8_t a,float16x8_t b)1219 float16x8_t test_vmulxq_laneq_f16(float16x8_t a, float16x8_t b) {
1220 return vmulxq_laneq_f16(a, b, 7);
1221 }
1222
1223 // CHECK-LABEL: test_vmulx_n_f16
1224 // CHECK: [[TMP0:%.*]] = insertelement <4 x half> undef, half %b, i32 0
1225 // CHECK: [[TMP1:%.*]] = insertelement <4 x half> [[TMP0]], half %b, i32 1
1226 // CHECK: [[TMP2:%.*]] = insertelement <4 x half> [[TMP1]], half %b, i32 2
1227 // CHECK: [[TMP3:%.*]] = insertelement <4 x half> [[TMP2]], half %b, i32 3
1228 // CHECK: [[MUL:%.*]] = call <4 x half> @llvm.aarch64.neon.fmulx.v4f16(<4 x half> %a, <4 x half> [[TMP3]])
1229 // CHECK: ret <4 x half> [[MUL]]
test_vmulx_n_f16(float16x4_t a,float16_t b)1230 float16x4_t test_vmulx_n_f16(float16x4_t a, float16_t b) {
1231 return vmulx_n_f16(a, b);
1232 }
1233
1234 // CHECK-LABEL: test_vmulxq_n_f16
1235 // CHECK: [[TMP0:%.*]] = insertelement <8 x half> undef, half %b, i32 0
1236 // CHECK: [[TMP1:%.*]] = insertelement <8 x half> [[TMP0]], half %b, i32 1
1237 // CHECK: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half %b, i32 2
1238 // CHECK: [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half %b, i32 3
1239 // CHECK: [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half %b, i32 4
1240 // CHECK: [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half %b, i32 5
1241 // CHECK: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half %b, i32 6
1242 // CHECK: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half %b, i32 7
1243 // CHECK: [[MUL:%.*]] = call <8 x half> @llvm.aarch64.neon.fmulx.v8f16(<8 x half> %a, <8 x half> [[TMP7]])
1244 // CHECK: ret <8 x half> [[MUL]]
test_vmulxq_n_f16(float16x8_t a,float16_t b)1245 float16x8_t test_vmulxq_n_f16(float16x8_t a, float16_t b) {
1246 return vmulxq_n_f16(a, b);
1247 }
1248
1249 // CHECK-LABEL: test_vmulxh_lane_f16
1250 // CHECK: [[EXTR:%.*]] = extractelement <4 x half> %b, i32 3
1251 // CHECK: [[MULX:%.*]] = call half @llvm.aarch64.neon.fmulx.f16(half %a, half [[EXTR]]
1252 // CHECK: ret half [[MULX]]
test_vmulxh_lane_f16(float16_t a,float16x4_t b)1253 float16_t test_vmulxh_lane_f16(float16_t a, float16x4_t b) {
1254 return vmulxh_lane_f16(a, b, 3);
1255 }
1256
1257 // CHECK-LABEL: test_vmulxh_laneq_f16
1258 // CHECK: [[EXTR:%.*]] = extractelement <8 x half> %b, i32 7
1259 // CHECK: [[MULX:%.*]] = call half @llvm.aarch64.neon.fmulx.f16(half %a, half [[EXTR]])
1260 // CHECK: ret half [[MULX]]
test_vmulxh_laneq_f16(float16_t a,float16x8_t b)1261 float16_t test_vmulxh_laneq_f16(float16_t a, float16x8_t b) {
1262 return vmulxh_laneq_f16(a, b, 7);
1263 }
1264
1265 // CHECK-LABEL: test_vmaxv_f16
1266 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8>
1267 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
1268 // CHECK: [[MAX:%.*]] = call half @llvm.aarch64.neon.fmaxv.f16.v4f16(<4 x half> [[TMP1]])
1269 // CHECK: ret half [[MAX]]
test_vmaxv_f16(float16x4_t a)1270 float16_t test_vmaxv_f16(float16x4_t a) {
1271 return vmaxv_f16(a);
1272 }
1273
1274 // CHECK-LABEL: test_vmaxvq_f16
1275 // CHECK: [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8>
1276 // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
1277 // CHECK: [[MAX:%.*]] = call half @llvm.aarch64.neon.fmaxv.f16.v8f16(<8 x half> [[TMP1]])
1278 // CHECK: ret half [[MAX]]
test_vmaxvq_f16(float16x8_t a)1279 float16_t test_vmaxvq_f16(float16x8_t a) {
1280 return vmaxvq_f16(a);
1281 }
1282
1283 // CHECK-LABEL: test_vminv_f16
1284 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8>
1285 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
1286 // CHECK: [[MAX:%.*]] = call half @llvm.aarch64.neon.fminv.f16.v4f16(<4 x half> [[TMP1]])
1287 // CHECK: ret half [[MAX]]
test_vminv_f16(float16x4_t a)1288 float16_t test_vminv_f16(float16x4_t a) {
1289 return vminv_f16(a);
1290 }
1291
1292 // CHECK-LABEL: test_vminvq_f16
1293 // CHECK: [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8>
1294 // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
1295 // CHECK: [[MAX:%.*]] = call half @llvm.aarch64.neon.fminv.f16.v8f16(<8 x half> [[TMP1]])
1296 // CHECK: ret half [[MAX]]
test_vminvq_f16(float16x8_t a)1297 float16_t test_vminvq_f16(float16x8_t a) {
1298 return vminvq_f16(a);
1299 }
1300
1301 // CHECK-LABEL: test_vmaxnmv_f16
1302 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8>
1303 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
1304 // CHECK: [[MAX:%.*]] = call half @llvm.aarch64.neon.fmaxnmv.f16.v4f16(<4 x half> [[TMP1]])
1305 // CHECK: ret half [[MAX]]
test_vmaxnmv_f16(float16x4_t a)1306 float16_t test_vmaxnmv_f16(float16x4_t a) {
1307 return vmaxnmv_f16(a);
1308 }
1309
1310 // CHECK-LABEL: test_vmaxnmvq_f16
1311 // CHECK: [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8>
1312 // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
1313 // CHECK: [[MAX:%.*]] = call half @llvm.aarch64.neon.fmaxnmv.f16.v8f16(<8 x half> [[TMP1]])
1314 // CHECK: ret half [[MAX]]
test_vmaxnmvq_f16(float16x8_t a)1315 float16_t test_vmaxnmvq_f16(float16x8_t a) {
1316 return vmaxnmvq_f16(a);
1317 }
1318
1319 // CHECK-LABEL: test_vminnmv_f16
1320 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8>
1321 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
1322 // CHECK: [[MAX:%.*]] = call half @llvm.aarch64.neon.fminnmv.f16.v4f16(<4 x half> [[TMP1]])
1323 // CHECK: ret half [[MAX]]
test_vminnmv_f16(float16x4_t a)1324 float16_t test_vminnmv_f16(float16x4_t a) {
1325 return vminnmv_f16(a);
1326 }
1327
1328 // CHECK-LABEL: test_vminnmvq_f16
1329 // CHECK: [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8>
1330 // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
1331 // CHECK: [[MAX:%.*]] = call half @llvm.aarch64.neon.fminnmv.f16.v8f16(<8 x half> [[TMP1]])
1332 // CHECK: ret half [[MAX]]
test_vminnmvq_f16(float16x8_t a)1333 float16_t test_vminnmvq_f16(float16x8_t a) {
1334 return vminnmvq_f16(a);
1335 }
1336
1337 // CHECK-LABEL: test_vbsl_f16
1338 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> %b to <8 x i8>
1339 // CHECK: [[TMP1:%.*]] = bitcast <4 x half> %c to <8 x i8>
1340 // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
1341 // CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
1342 // CHECK: [[TMP4:%.*]] = and <4 x i16> %a, [[TMP2]]
1343 // CHECK: [[TMP5:%.*]] = xor <4 x i16> %a, <i16 -1, i16 -1, i16 -1, i16 -1>
1344 // CHECK: [[TMP6:%.*]] = and <4 x i16> [[TMP5]], [[TMP3]]
1345 // CHECK: [[TMP7:%.*]] = or <4 x i16> [[TMP4]], [[TMP6]]
1346 // CHECK: [[TMP8:%.*]] = bitcast <4 x i16> [[TMP7]] to <4 x half>
1347 // CHECK: ret <4 x half> [[TMP8]]
test_vbsl_f16(uint16x4_t a,float16x4_t b,float16x4_t c)1348 float16x4_t test_vbsl_f16(uint16x4_t a, float16x4_t b, float16x4_t c) {
1349 return vbsl_f16(a, b, c);
1350 }
1351
1352 // CHECK-LABEL: test_vbslq_f16
1353 // CHECK: [[TMP0:%.*]] = bitcast <8 x half> %b to <16 x i8>
1354 // CHECK: [[TMP1:%.*]] = bitcast <8 x half> %c to <16 x i8>
1355 // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
1356 // CHECK: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
1357 // CHECK: [[TMP4:%.*]] = and <8 x i16> %a, [[TMP2]]
1358 // CHECK: [[TMP5:%.*]] = xor <8 x i16> %a, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
1359 // CHECK: [[TMP6:%.*]] = and <8 x i16> [[TMP5]], [[TMP3]]
1360 // CHECK: [[TMP7:%.*]] = or <8 x i16> [[TMP4]], [[TMP6]]
1361 // CHECK: [[TMP8:%.*]] = bitcast <8 x i16> [[TMP7]] to <8 x half>
1362 // CHECK: ret <8 x half> [[TMP8]]
test_vbslq_f16(uint16x8_t a,float16x8_t b,float16x8_t c)1363 float16x8_t test_vbslq_f16(uint16x8_t a, float16x8_t b, float16x8_t c) {
1364 return vbslq_f16(a, b, c);
1365 }
1366
1367 // CHECK-LABEL: test_vzip_f16
1368 // CHECK: [[RETVAL:%.*]] = alloca %struct.float16x4x2_t, align 8
1369 // CHECK: [[__RET_I:%.*]] = alloca %struct.float16x4x2_t, align 8
1370 // CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x2_t* [[RETVAL]] to i8*
1371 // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <4 x half>*
1372 // CHECK: [[VZIP0_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
1373 // CHECK: store <4 x half> [[VZIP0_I]], <4 x half>* [[TMP1]]
1374 // CHECK: [[TMP2:%.*]] = getelementptr inbounds <4 x half>, <4 x half>* [[TMP1]], i32 1
1375 // CHECK: [[VZIP1_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
1376 // CHECK: store <4 x half> [[VZIP1_I]], <4 x half>* [[TMP2]]
test_vzip_f16(float16x4_t a,float16x4_t b)1377 float16x4x2_t test_vzip_f16(float16x4_t a, float16x4_t b) {
1378 return vzip_f16(a, b);
1379 }
1380
1381 // CHECK-LABEL: test_vzipq_f16
1382 // CHECK: [[RETVAL:%.*]] = alloca %struct.float16x8x2_t, align 16
1383 // CHECK: [[__RET_I:%.*]] = alloca %struct.float16x8x2_t, align 16
1384 // CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x2_t* [[RETVAL]] to i8*
1385 // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x half>*
1386 // CHECK: [[VZIP0_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
1387 // CHECK: store <8 x half> [[VZIP0_I]], <8 x half>* [[TMP1]]
1388 // CHECK: [[TMP2:%.*]] = getelementptr inbounds <8 x half>, <8 x half>* [[TMP1]], i32 1
1389 // CHECK: [[VZIP1_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
1390 // CHECK: store <8 x half> [[VZIP1_I]], <8 x half>* [[TMP2]]
test_vzipq_f16(float16x8_t a,float16x8_t b)1391 float16x8x2_t test_vzipq_f16(float16x8_t a, float16x8_t b) {
1392 return vzipq_f16(a, b);
1393 }
1394
1395 // CHECK-LABEL: test_vuzp_f16
1396 // CHECK: [[RETVAL:%.*]] = alloca %struct.float16x4x2_t, align 8
1397 // CHECK: [[__RET_I:%.*]] = alloca %struct.float16x4x2_t, align 8
1398 // CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x2_t* [[RETVAL]] to i8*
1399 // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <4 x half>*
1400 // CHECK: [[VZIP0_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
1401 // CHECK: store <4 x half> [[VZIP0_I]], <4 x half>* [[TMP1]]
1402 // CHECK: [[TMP2:%.*]] = getelementptr inbounds <4 x half>, <4 x half>* [[TMP1]], i32 1
1403 // CHECK: [[VZIP1_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
1404 // CHECK: store <4 x half> [[VZIP1_I]], <4 x half>* [[TMP2]]
test_vuzp_f16(float16x4_t a,float16x4_t b)1405 float16x4x2_t test_vuzp_f16(float16x4_t a, float16x4_t b) {
1406 return vuzp_f16(a, b);
1407 }
1408
1409 // CHECK-LABEL: test_vuzpq_f16
1410 // CHECK: [[RETVAL:%.*]] = alloca %struct.float16x8x2_t, align 16
1411 // CHECK: [[__RET_I:%.*]] = alloca %struct.float16x8x2_t, align 16
1412 // CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x2_t* [[RETVAL]] to i8*
1413 // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x half>*
1414 // CHECK: [[VZIP0_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
1415 // CHECK: store <8 x half> [[VZIP0_I]], <8 x half>* [[TMP1]]
1416 // CHECK: [[TMP2:%.*]] = getelementptr inbounds <8 x half>, <8 x half>* [[TMP1]], i32 1
1417 // CHECK: [[VZIP1_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
1418 // CHECK: store <8 x half> [[VZIP1_I]], <8 x half>* [[TMP2]]
test_vuzpq_f16(float16x8_t a,float16x8_t b)1419 float16x8x2_t test_vuzpq_f16(float16x8_t a, float16x8_t b) {
1420 return vuzpq_f16(a, b);
1421 }
1422
1423 // CHECK-LABEL: test_vtrn_f16
1424 // CHECK: [[RETVAL:%.*]] = alloca %struct.float16x4x2_t, align 8
1425 // CHECK: [[__RET_I:%.*]] = alloca %struct.float16x4x2_t, align 8
1426 // CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x2_t* [[RETVAL]] to i8*
1427 // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <4 x half>*
1428 // CHECK: [[VZIP0_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
1429 // CHECK: store <4 x half> [[VZIP0_I]], <4 x half>* [[TMP1]]
1430 // CHECK: [[TMP2:%.*]] = getelementptr inbounds <4 x half>, <4 x half>* [[TMP1]], i32 1
1431 // CHECK: [[VZIP1_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
1432 // CHECK: store <4 x half> [[VZIP1_I]], <4 x half>* [[TMP2]]
test_vtrn_f16(float16x4_t a,float16x4_t b)1433 float16x4x2_t test_vtrn_f16(float16x4_t a, float16x4_t b) {
1434 return vtrn_f16(a, b);
1435 }
1436
1437 // CHECK-LABEL: test_vtrnq_f16
1438 // CHECK: [[RETVAL:%.*]] = alloca %struct.float16x8x2_t, align 16
1439 // CHECK: [[__RET_I:%.*]] = alloca %struct.float16x8x2_t, align 16
1440 // CHECK: [[TMP0:%.*]] = bitcast %struct.float16x8x2_t* [[RETVAL]] to i8*
1441 // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x half>*
1442 // CHECK: [[VZIP0_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
1443 // CHECK: store <8 x half> [[VZIP0_I]], <8 x half>* [[TMP1]]
1444 // CHECK: [[TMP2:%.*]] = getelementptr inbounds <8 x half>, <8 x half>* [[TMP1]], i32 1
1445 // CHECK: [[VZIP1_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
1446 // CHECK: store <8 x half> [[VZIP1_I]], <8 x half>* [[TMP2]]
test_vtrnq_f16(float16x8_t a,float16x8_t b)1447 float16x8x2_t test_vtrnq_f16(float16x8_t a, float16x8_t b) {
1448 return vtrnq_f16(a, b);
1449 }
1450
1451 // CHECK-LABEL: test_vmov_n_f16
1452 // CHECK: [[TMP0:%.*]] = insertelement <4 x half> undef, half %a, i32 0
1453 // CHECK: [[TMP1:%.*]] = insertelement <4 x half> [[TMP0]], half %a, i32 1
1454 // CHECK: [[TMP2:%.*]] = insertelement <4 x half> [[TMP1]], half %a, i32 2
1455 // CHECK: [[TMP3:%.*]] = insertelement <4 x half> [[TMP2]], half %a, i32 3
1456 // CHECK: ret <4 x half> [[TMP3]]
test_vmov_n_f16(float16_t a)1457 float16x4_t test_vmov_n_f16(float16_t a) {
1458 return vmov_n_f16(a);
1459 }
1460
1461 // CHECK-LABEL: test_vmovq_n_f16
1462 // CHECK: [[TMP0:%.*]] = insertelement <8 x half> undef, half %a, i32 0
1463 // CHECK: [[TMP1:%.*]] = insertelement <8 x half> [[TMP0]], half %a, i32 1
1464 // CHECK: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half %a, i32 2
1465 // CHECK: [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half %a, i32 3
1466 // CHECK: [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half %a, i32 4
1467 // CHECK: [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half %a, i32 5
1468 // CHECK: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half %a, i32 6
1469 // CHECK: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half %a, i32 7
1470 // CHECK: ret <8 x half> [[TMP7]]
test_vmovq_n_f16(float16_t a)1471 float16x8_t test_vmovq_n_f16(float16_t a) {
1472 return vmovq_n_f16(a);
1473 }
1474
1475 // CHECK-LABEL: test_vdup_n_f16
1476 // CHECK: [[TMP0:%.*]] = insertelement <4 x half> undef, half %a, i32 0
1477 // CHECK: [[TMP1:%.*]] = insertelement <4 x half> [[TMP0]], half %a, i32 1
1478 // CHECK: [[TMP2:%.*]] = insertelement <4 x half> [[TMP1]], half %a, i32 2
1479 // CHECK: [[TMP3:%.*]] = insertelement <4 x half> [[TMP2]], half %a, i32 3
1480 // CHECK: ret <4 x half> [[TMP3]]
test_vdup_n_f16(float16_t a)1481 float16x4_t test_vdup_n_f16(float16_t a) {
1482 return vdup_n_f16(a);
1483 }
1484
1485 // CHECK-LABEL: test_vdupq_n_f16
1486 // CHECK: [[TMP0:%.*]] = insertelement <8 x half> undef, half %a, i32 0
1487 // CHECK: [[TMP1:%.*]] = insertelement <8 x half> [[TMP0]], half %a, i32 1
1488 // CHECK: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half %a, i32 2
1489 // CHECK: [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half %a, i32 3
1490 // CHECK: [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half %a, i32 4
1491 // CHECK: [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half %a, i32 5
1492 // CHECK: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half %a, i32 6
1493 // CHECK: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half %a, i32 7
1494 // CHECK: ret <8 x half> [[TMP7]]
test_vdupq_n_f16(float16_t a)1495 float16x8_t test_vdupq_n_f16(float16_t a) {
1496 return vdupq_n_f16(a);
1497 }
1498
1499 // CHECK-LABEL: test_vdup_lane_f16
1500 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> [[A:%.*]] to <8 x i8>
1501 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
1502 // CHECK: [[LANE:%.*]] = shufflevector <4 x half> [[TMP1]], <4 x half> [[TMP1]], <4 x i32> <i32 3, i32 3, i32 3, i32 3>
1503 // CHECK: ret <4 x half> [[LANE]]
test_vdup_lane_f16(float16x4_t a)1504 float16x4_t test_vdup_lane_f16(float16x4_t a) {
1505 return vdup_lane_f16(a, 3);
1506 }
1507
1508 // CHECK-LABEL: test_vdupq_lane_f16
1509 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> [[A:%.*]] to <8 x i8>
1510 // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
1511 // CHECK: [[LANE:%.*]] = shufflevector <4 x half> [[TMP1]], <4 x half> [[TMP1]], <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
1512 // CHECK: ret <8 x half> [[LANE]]
test_vdupq_lane_f16(float16x4_t a)1513 float16x8_t test_vdupq_lane_f16(float16x4_t a) {
1514 return vdupq_lane_f16(a, 3);
1515 }
1516
1517 // CHECK-LABEL: @test_vext_f16(
1518 // CHECK: [[TMP0:%.*]] = bitcast <4 x half> %a to <8 x i8>
1519 // CHECK: [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8>
1520 // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
1521 // CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half>
1522 // CHECK: [[VEXT:%.*]] = shufflevector <4 x half> [[TMP2]], <4 x half> [[TMP3]], <4 x i32> <i32 2, i32 3, i32 4, i32 5>
1523 // CHECK: ret <4 x half> [[VEXT]]
test_vext_f16(float16x4_t a,float16x4_t b)1524 float16x4_t test_vext_f16(float16x4_t a, float16x4_t b) {
1525 return vext_f16(a, b, 2);
1526 }
1527
1528 // CHECK-LABEL: @test_vextq_f16(
1529 // CHECK: [[TMP0:%.*]] = bitcast <8 x half> %a to <16 x i8>
1530 // CHECK: [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8>
1531 // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
1532 // CHECK: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half>
1533 // CHECK: [[VEXT:%.*]] = shufflevector <8 x half> [[TMP2]], <8 x half> [[TMP3]], <8 x i32> <i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12>
1534 // CHECK: ret <8 x half> [[VEXT]]
test_vextq_f16(float16x8_t a,float16x8_t b)1535 float16x8_t test_vextq_f16(float16x8_t a, float16x8_t b) {
1536 return vextq_f16(a, b, 5);
1537 }
1538
1539 // CHECK-LABEL: @test_vrev64_f16(
1540 // CHECK: [[SHFL:%.*]] = shufflevector <4 x half> %a, <4 x half> %a, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
1541 // CHECK: ret <4 x half> [[SHFL]]
test_vrev64_f16(float16x4_t a)1542 float16x4_t test_vrev64_f16(float16x4_t a) {
1543 return vrev64_f16(a);
1544 }
1545
1546 // CHECK-LABEL: @test_vrev64q_f16(
1547 // CHECK: [[SHFL:%.*]] = shufflevector <8 x half> %a, <8 x half> %a, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
1548 // CHECK: ret <8 x half> [[SHFL]]
test_vrev64q_f16(float16x8_t a)1549 float16x8_t test_vrev64q_f16(float16x8_t a) {
1550 return vrev64q_f16(a);
1551 }
1552
1553 // CHECK-LABEL: @test_vzip1_f16(
1554 // CHECK: [[SHFL:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
1555 // CHECK: ret <4 x half> [[SHFL]]
test_vzip1_f16(float16x4_t a,float16x4_t b)1556 float16x4_t test_vzip1_f16(float16x4_t a, float16x4_t b) {
1557 return vzip1_f16(a, b);
1558 }
1559
1560 // CHECK-LABEL: @test_vzip1q_f16(
1561 // CHECK: [[SHFL:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
1562 // CHECK: ret <8 x half> [[SHFL]]
test_vzip1q_f16(float16x8_t a,float16x8_t b)1563 float16x8_t test_vzip1q_f16(float16x8_t a, float16x8_t b) {
1564 return vzip1q_f16(a, b);
1565 }
1566
1567 // CHECK-LABEL: @test_vzip2_f16(
1568 // CHECK: [[SHFL:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
1569 // CHECK: ret <4 x half> [[SHFL]]
test_vzip2_f16(float16x4_t a,float16x4_t b)1570 float16x4_t test_vzip2_f16(float16x4_t a, float16x4_t b) {
1571 return vzip2_f16(a, b);
1572 }
1573
1574 // CHECK-LABEL: @test_vzip2q_f16(
1575 // CHECK: [[SHFL:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
1576 // CHECK: ret <8 x half> [[SHFL]]
test_vzip2q_f16(float16x8_t a,float16x8_t b)1577 float16x8_t test_vzip2q_f16(float16x8_t a, float16x8_t b) {
1578 return vzip2q_f16(a, b);
1579 }
1580
1581 // CHECK-LABEL: @test_vuzp1_f16(
1582 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
1583 // CHECK: ret <4 x half> [[SHUFFLE_I]]
test_vuzp1_f16(float16x4_t a,float16x4_t b)1584 float16x4_t test_vuzp1_f16(float16x4_t a, float16x4_t b) {
1585 return vuzp1_f16(a, b);
1586 }
1587
1588 // CHECK-LABEL: @test_vuzp1q_f16(
1589 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
1590 // CHECK: ret <8 x half> [[SHUFFLE_I]]
test_vuzp1q_f16(float16x8_t a,float16x8_t b)1591 float16x8_t test_vuzp1q_f16(float16x8_t a, float16x8_t b) {
1592 return vuzp1q_f16(a, b);
1593 }
1594
1595 // CHECK-LABEL: @test_vuzp2_f16(
1596 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
1597 // CHECK: ret <4 x half> [[SHUFFLE_I]]
test_vuzp2_f16(float16x4_t a,float16x4_t b)1598 float16x4_t test_vuzp2_f16(float16x4_t a, float16x4_t b) {
1599 return vuzp2_f16(a, b);
1600 }
1601
1602 // CHECK-LABEL: @test_vuzp2q_f16(
1603 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
1604 // CHECK: ret <8 x half> [[SHUFFLE_I]]
test_vuzp2q_f16(float16x8_t a,float16x8_t b)1605 float16x8_t test_vuzp2q_f16(float16x8_t a, float16x8_t b) {
1606 return vuzp2q_f16(a, b);
1607 }
1608
1609 // CHECK-LABEL: @test_vtrn1_f16(
1610 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
1611 // CHECK: ret <4 x half> [[SHUFFLE_I]]
test_vtrn1_f16(float16x4_t a,float16x4_t b)1612 float16x4_t test_vtrn1_f16(float16x4_t a, float16x4_t b) {
1613 return vtrn1_f16(a, b);
1614 }
1615
1616 // CHECK-LABEL: @test_vtrn1q_f16(
1617 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
1618 // CHECK: ret <8 x half> [[SHUFFLE_I]]
test_vtrn1q_f16(float16x8_t a,float16x8_t b)1619 float16x8_t test_vtrn1q_f16(float16x8_t a, float16x8_t b) {
1620 return vtrn1q_f16(a, b);
1621 }
1622
1623 // CHECK-LABEL: @test_vtrn2_f16(
1624 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
1625 // CHECK: ret <4 x half> [[SHUFFLE_I]]
test_vtrn2_f16(float16x4_t a,float16x4_t b)1626 float16x4_t test_vtrn2_f16(float16x4_t a, float16x4_t b) {
1627 return vtrn2_f16(a, b);
1628 }
1629
1630 // CHECK-LABEL: @test_vtrn2q_f16(
1631 // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
1632 // CHECK: ret <8 x half> [[SHUFFLE_I]]
test_vtrn2q_f16(float16x8_t a,float16x8_t b)1633 float16x8_t test_vtrn2q_f16(float16x8_t a, float16x8_t b) {
1634 return vtrn2q_f16(a, b);
1635 }
1636
1637 // CHECK-LABEL: @test_vduph_laneq_f16(
1638 // CHECK: [[V:%.*]] = extractelement <8 x half> [[V2:%.*]], i32 7
1639 // CHECK-NEXT: ret half [[V]]
test_vduph_laneq_f16(float16x8_t vec)1640 float16_t test_vduph_laneq_f16(float16x8_t vec) {
1641 return vduph_laneq_f16(vec, 7);
1642 }
1643
1644 // CHECK-LABEL: @test_vduph_lane_f16(
1645 // CHECK: [[V:%.*]] = extractelement <4 x half> [[V2:%.*]], i32 3
1646 // CHECK-NEXT: ret half [[V]]
test_vduph_lane_f16(float16x4_t vec)1647 float16_t test_vduph_lane_f16(float16x4_t vec) {
1648 return vduph_lane_f16(vec, 3);
1649 }
1650