1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
3 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -DPOLYMORPHIC -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
4
5 #include <arm_mve.h>
6
7 // CHECK-LABEL: @test_vabsq_f16(
8 // CHECK-NEXT: entry:
9 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.fabs.v8f16(<8 x half> [[A:%.*]])
10 // CHECK-NEXT: ret <8 x half> [[TMP0]]
11 //
test_vabsq_f16(float16x8_t a)12 float16x8_t test_vabsq_f16(float16x8_t a)
13 {
14 #ifdef POLYMORPHIC
15 return vabsq(a);
16 #else /* POLYMORPHIC */
17 return vabsq_f16(a);
18 #endif /* POLYMORPHIC */
19 }
20
21 // CHECK-LABEL: @test_vabsq_f32(
22 // CHECK-NEXT: entry:
23 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.fabs.v4f32(<4 x float> [[A:%.*]])
24 // CHECK-NEXT: ret <4 x float> [[TMP0]]
25 //
test_vabsq_f32(float32x4_t a)26 float32x4_t test_vabsq_f32(float32x4_t a)
27 {
28 #ifdef POLYMORPHIC
29 return vabsq(a);
30 #else /* POLYMORPHIC */
31 return vabsq_f32(a);
32 #endif /* POLYMORPHIC */
33 }
34
35 // CHECK-LABEL: @test_vabsq_s8(
36 // CHECK-NEXT: entry:
37 // CHECK-NEXT: [[TMP0:%.*]] = icmp slt <16 x i8> [[A:%.*]], zeroinitializer
38 // CHECK-NEXT: [[TMP1:%.*]] = sub <16 x i8> zeroinitializer, [[A]]
39 // CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[TMP0]], <16 x i8> [[TMP1]], <16 x i8> [[A]]
40 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
41 //
test_vabsq_s8(int8x16_t a)42 int8x16_t test_vabsq_s8(int8x16_t a)
43 {
44 #ifdef POLYMORPHIC
45 return vabsq(a);
46 #else /* POLYMORPHIC */
47 return vabsq_s8(a);
48 #endif /* POLYMORPHIC */
49 }
50
51 // CHECK-LABEL: @test_vabsq_s16(
52 // CHECK-NEXT: entry:
53 // CHECK-NEXT: [[TMP0:%.*]] = icmp slt <8 x i16> [[A:%.*]], zeroinitializer
54 // CHECK-NEXT: [[TMP1:%.*]] = sub <8 x i16> zeroinitializer, [[A]]
55 // CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP0]], <8 x i16> [[TMP1]], <8 x i16> [[A]]
56 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
57 //
test_vabsq_s16(int16x8_t a)58 int16x8_t test_vabsq_s16(int16x8_t a)
59 {
60 #ifdef POLYMORPHIC
61 return vabsq(a);
62 #else /* POLYMORPHIC */
63 return vabsq_s16(a);
64 #endif /* POLYMORPHIC */
65 }
66
67 // CHECK-LABEL: @test_vabsq_s32(
68 // CHECK-NEXT: entry:
69 // CHECK-NEXT: [[TMP0:%.*]] = icmp slt <4 x i32> [[A:%.*]], zeroinitializer
70 // CHECK-NEXT: [[TMP1:%.*]] = sub <4 x i32> zeroinitializer, [[A]]
71 // CHECK-NEXT: [[TMP2:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP1]], <4 x i32> [[A]]
72 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
73 //
test_vabsq_s32(int32x4_t a)74 int32x4_t test_vabsq_s32(int32x4_t a)
75 {
76 #ifdef POLYMORPHIC
77 return vabsq(a);
78 #else /* POLYMORPHIC */
79 return vabsq_s32(a);
80 #endif /* POLYMORPHIC */
81 }
82
83 // CHECK-LABEL: @test_vmvnq_s8(
84 // CHECK-NEXT: entry:
85 // CHECK-NEXT: [[TMP0:%.*]] = xor <16 x i8> [[A:%.*]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
86 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
87 //
test_vmvnq_s8(int8x16_t a)88 int8x16_t test_vmvnq_s8(int8x16_t a)
89 {
90 #ifdef POLYMORPHIC
91 return vmvnq(a);
92 #else /* POLYMORPHIC */
93 return vmvnq_s8(a);
94 #endif /* POLYMORPHIC */
95 }
96
97 // CHECK-LABEL: @test_vmvnq_s16(
98 // CHECK-NEXT: entry:
99 // CHECK-NEXT: [[TMP0:%.*]] = xor <8 x i16> [[A:%.*]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
100 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
101 //
test_vmvnq_s16(int16x8_t a)102 int16x8_t test_vmvnq_s16(int16x8_t a)
103 {
104 #ifdef POLYMORPHIC
105 return vmvnq(a);
106 #else /* POLYMORPHIC */
107 return vmvnq_s16(a);
108 #endif /* POLYMORPHIC */
109 }
110
111 // CHECK-LABEL: @test_vmvnq_s32(
112 // CHECK-NEXT: entry:
113 // CHECK-NEXT: [[TMP0:%.*]] = xor <4 x i32> [[A:%.*]], <i32 -1, i32 -1, i32 -1, i32 -1>
114 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
115 //
test_vmvnq_s32(int32x4_t a)116 int32x4_t test_vmvnq_s32(int32x4_t a)
117 {
118 #ifdef POLYMORPHIC
119 return vmvnq(a);
120 #else /* POLYMORPHIC */
121 return vmvnq_s32(a);
122 #endif /* POLYMORPHIC */
123 }
124
125 // CHECK-LABEL: @test_vmvnq_u8(
126 // CHECK-NEXT: entry:
127 // CHECK-NEXT: [[TMP0:%.*]] = xor <16 x i8> [[A:%.*]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
128 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
129 //
test_vmvnq_u8(uint8x16_t a)130 uint8x16_t test_vmvnq_u8(uint8x16_t a)
131 {
132 #ifdef POLYMORPHIC
133 return vmvnq(a);
134 #else /* POLYMORPHIC */
135 return vmvnq_u8(a);
136 #endif /* POLYMORPHIC */
137 }
138
139 // CHECK-LABEL: @test_vmvnq_u16(
140 // CHECK-NEXT: entry:
141 // CHECK-NEXT: [[TMP0:%.*]] = xor <8 x i16> [[A:%.*]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
142 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
143 //
test_vmvnq_u16(uint16x8_t a)144 uint16x8_t test_vmvnq_u16(uint16x8_t a)
145 {
146 #ifdef POLYMORPHIC
147 return vmvnq(a);
148 #else /* POLYMORPHIC */
149 return vmvnq_u16(a);
150 #endif /* POLYMORPHIC */
151 }
152
153 // CHECK-LABEL: @test_vmvnq_u32(
154 // CHECK-NEXT: entry:
155 // CHECK-NEXT: [[TMP0:%.*]] = xor <4 x i32> [[A:%.*]], <i32 -1, i32 -1, i32 -1, i32 -1>
156 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
157 //
test_vmvnq_u32(uint32x4_t a)158 uint32x4_t test_vmvnq_u32(uint32x4_t a)
159 {
160 #ifdef POLYMORPHIC
161 return vmvnq(a);
162 #else /* POLYMORPHIC */
163 return vmvnq_u32(a);
164 #endif /* POLYMORPHIC */
165 }
166
167 // CHECK-LABEL: @test_vmvnq_m_s8(
168 // CHECK-NEXT: entry:
169 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
170 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
171 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.mvn.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
172 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
173 //
test_vmvnq_m_s8(int8x16_t inactive,int8x16_t a,mve_pred16_t p)174 int8x16_t test_vmvnq_m_s8(int8x16_t inactive, int8x16_t a, mve_pred16_t p)
175 {
176 #ifdef POLYMORPHIC
177 return vmvnq_m(inactive, a, p);
178 #else /* POLYMORPHIC */
179 return vmvnq_m_s8(inactive, a, p);
180 #endif /* POLYMORPHIC */
181 }
182
183 // CHECK-LABEL: @test_vmvnq_m_s16(
184 // CHECK-NEXT: entry:
185 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
186 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
187 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.mvn.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
188 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
189 //
test_vmvnq_m_s16(int16x8_t inactive,int16x8_t a,mve_pred16_t p)190 int16x8_t test_vmvnq_m_s16(int16x8_t inactive, int16x8_t a, mve_pred16_t p)
191 {
192 #ifdef POLYMORPHIC
193 return vmvnq_m(inactive, a, p);
194 #else /* POLYMORPHIC */
195 return vmvnq_m_s16(inactive, a, p);
196 #endif /* POLYMORPHIC */
197 }
198
199 // CHECK-LABEL: @test_vmvnq_m_s32(
200 // CHECK-NEXT: entry:
201 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
202 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
203 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.mvn.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
204 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
205 //
test_vmvnq_m_s32(int32x4_t inactive,int32x4_t a,mve_pred16_t p)206 int32x4_t test_vmvnq_m_s32(int32x4_t inactive, int32x4_t a, mve_pred16_t p)
207 {
208 #ifdef POLYMORPHIC
209 return vmvnq_m(inactive, a, p);
210 #else /* POLYMORPHIC */
211 return vmvnq_m_s32(inactive, a, p);
212 #endif /* POLYMORPHIC */
213 }
214
215 // CHECK-LABEL: @test_vmvnq_m_u8(
216 // CHECK-NEXT: entry:
217 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
218 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
219 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.mvn.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
220 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
221 //
test_vmvnq_m_u8(uint8x16_t inactive,uint8x16_t a,mve_pred16_t p)222 uint8x16_t test_vmvnq_m_u8(uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
223 {
224 #ifdef POLYMORPHIC
225 return vmvnq_m(inactive, a, p);
226 #else /* POLYMORPHIC */
227 return vmvnq_m_u8(inactive, a, p);
228 #endif /* POLYMORPHIC */
229 }
230
231 // CHECK-LABEL: @test_vmvnq_m_u16(
232 // CHECK-NEXT: entry:
233 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
234 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
235 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.mvn.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
236 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
237 //
test_vmvnq_m_u16(uint16x8_t inactive,uint16x8_t a,mve_pred16_t p)238 uint16x8_t test_vmvnq_m_u16(uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
239 {
240 #ifdef POLYMORPHIC
241 return vmvnq_m(inactive, a, p);
242 #else /* POLYMORPHIC */
243 return vmvnq_m_u16(inactive, a, p);
244 #endif /* POLYMORPHIC */
245 }
246
247 // CHECK-LABEL: @test_vmvnq_m_u32(
248 // CHECK-NEXT: entry:
249 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
250 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
251 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.mvn.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
252 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
253 //
test_vmvnq_m_u32(uint32x4_t inactive,uint32x4_t a,mve_pred16_t p)254 uint32x4_t test_vmvnq_m_u32(uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
255 {
256 #ifdef POLYMORPHIC
257 return vmvnq_m(inactive, a, p);
258 #else /* POLYMORPHIC */
259 return vmvnq_m_u32(inactive, a, p);
260 #endif /* POLYMORPHIC */
261 }
262
263 // CHECK-LABEL: @test_vmvnq_x_s8(
264 // CHECK-NEXT: entry:
265 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
266 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
267 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.mvn.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> undef)
268 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
269 //
test_vmvnq_x_s8(int8x16_t a,mve_pred16_t p)270 int8x16_t test_vmvnq_x_s8(int8x16_t a, mve_pred16_t p)
271 {
272 #ifdef POLYMORPHIC
273 return vmvnq_x(a, p);
274 #else /* POLYMORPHIC */
275 return vmvnq_x_s8(a, p);
276 #endif /* POLYMORPHIC */
277 }
278
279 // CHECK-LABEL: @test_vmvnq_x_s16(
280 // CHECK-NEXT: entry:
281 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
282 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
283 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.mvn.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> undef)
284 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
285 //
test_vmvnq_x_s16(int16x8_t a,mve_pred16_t p)286 int16x8_t test_vmvnq_x_s16(int16x8_t a, mve_pred16_t p)
287 {
288 #ifdef POLYMORPHIC
289 return vmvnq_x(a, p);
290 #else /* POLYMORPHIC */
291 return vmvnq_x_s16(a, p);
292 #endif /* POLYMORPHIC */
293 }
294
295 // CHECK-LABEL: @test_vmvnq_x_s32(
296 // CHECK-NEXT: entry:
297 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
298 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
299 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.mvn.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> undef)
300 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
301 //
test_vmvnq_x_s32(int32x4_t a,mve_pred16_t p)302 int32x4_t test_vmvnq_x_s32(int32x4_t a, mve_pred16_t p)
303 {
304 #ifdef POLYMORPHIC
305 return vmvnq_x(a, p);
306 #else /* POLYMORPHIC */
307 return vmvnq_x_s32(a, p);
308 #endif /* POLYMORPHIC */
309 }
310
311 // CHECK-LABEL: @test_vmvnq_x_u8(
312 // CHECK-NEXT: entry:
313 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
314 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
315 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.mvn.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> undef)
316 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
317 //
test_vmvnq_x_u8(uint8x16_t a,mve_pred16_t p)318 uint8x16_t test_vmvnq_x_u8(uint8x16_t a, mve_pred16_t p)
319 {
320 #ifdef POLYMORPHIC
321 return vmvnq_x(a, p);
322 #else /* POLYMORPHIC */
323 return vmvnq_x_u8(a, p);
324 #endif /* POLYMORPHIC */
325 }
326
327 // CHECK-LABEL: @test_vmvnq_x_u16(
328 // CHECK-NEXT: entry:
329 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
330 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
331 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.mvn.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> undef)
332 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
333 //
test_vmvnq_x_u16(uint16x8_t a,mve_pred16_t p)334 uint16x8_t test_vmvnq_x_u16(uint16x8_t a, mve_pred16_t p)
335 {
336 #ifdef POLYMORPHIC
337 return vmvnq_x(a, p);
338 #else /* POLYMORPHIC */
339 return vmvnq_x_u16(a, p);
340 #endif /* POLYMORPHIC */
341 }
342
343 // CHECK-LABEL: @test_vmvnq_x_u32(
344 // CHECK-NEXT: entry:
345 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
346 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
347 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.mvn.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> undef)
348 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
349 //
test_vmvnq_x_u32(uint32x4_t a,mve_pred16_t p)350 uint32x4_t test_vmvnq_x_u32(uint32x4_t a, mve_pred16_t p)
351 {
352 #ifdef POLYMORPHIC
353 return vmvnq_x(a, p);
354 #else /* POLYMORPHIC */
355 return vmvnq_x_u32(a, p);
356 #endif /* POLYMORPHIC */
357 }
358
359 // CHECK-LABEL: @test_vnegq_f16(
360 // CHECK-NEXT: entry:
361 // CHECK-NEXT: [[TMP0:%.*]] = fneg <8 x half> [[A:%.*]]
362 // CHECK-NEXT: ret <8 x half> [[TMP0]]
363 //
test_vnegq_f16(float16x8_t a)364 float16x8_t test_vnegq_f16(float16x8_t a)
365 {
366 #ifdef POLYMORPHIC
367 return vnegq(a);
368 #else /* POLYMORPHIC */
369 return vnegq_f16(a);
370 #endif /* POLYMORPHIC */
371 }
372
373 // CHECK-LABEL: @test_vnegq_f32(
374 // CHECK-NEXT: entry:
375 // CHECK-NEXT: [[TMP0:%.*]] = fneg <4 x float> [[A:%.*]]
376 // CHECK-NEXT: ret <4 x float> [[TMP0]]
377 //
test_vnegq_f32(float32x4_t a)378 float32x4_t test_vnegq_f32(float32x4_t a)
379 {
380 #ifdef POLYMORPHIC
381 return vnegq(a);
382 #else /* POLYMORPHIC */
383 return vnegq_f32(a);
384 #endif /* POLYMORPHIC */
385 }
386
387 // CHECK-LABEL: @test_vnegq_s8(
388 // CHECK-NEXT: entry:
389 // CHECK-NEXT: [[TMP0:%.*]] = sub <16 x i8> zeroinitializer, [[A:%.*]]
390 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
391 //
test_vnegq_s8(int8x16_t a)392 int8x16_t test_vnegq_s8(int8x16_t a)
393 {
394 #ifdef POLYMORPHIC
395 return vnegq(a);
396 #else /* POLYMORPHIC */
397 return vnegq_s8(a);
398 #endif /* POLYMORPHIC */
399 }
400
401 // CHECK-LABEL: @test_vnegq_s16(
402 // CHECK-NEXT: entry:
403 // CHECK-NEXT: [[TMP0:%.*]] = sub <8 x i16> zeroinitializer, [[A:%.*]]
404 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
405 //
test_vnegq_s16(int16x8_t a)406 int16x8_t test_vnegq_s16(int16x8_t a)
407 {
408 #ifdef POLYMORPHIC
409 return vnegq(a);
410 #else /* POLYMORPHIC */
411 return vnegq_s16(a);
412 #endif /* POLYMORPHIC */
413 }
414
415 // CHECK-LABEL: @test_vnegq_s32(
416 // CHECK-NEXT: entry:
417 // CHECK-NEXT: [[TMP0:%.*]] = sub <4 x i32> zeroinitializer, [[A:%.*]]
418 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
419 //
test_vnegq_s32(int32x4_t a)420 int32x4_t test_vnegq_s32(int32x4_t a)
421 {
422 #ifdef POLYMORPHIC
423 return vnegq(a);
424 #else /* POLYMORPHIC */
425 return vnegq_s32(a);
426 #endif /* POLYMORPHIC */
427 }
428
429 // CHECK-LABEL: @test_vqabsq_s8(
430 // CHECK-NEXT: entry:
431 // CHECK-NEXT: [[TMP0:%.*]] = icmp sgt <16 x i8> [[A:%.*]], zeroinitializer
432 // CHECK-NEXT: [[TMP1:%.*]] = icmp eq <16 x i8> [[A]], <i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128>
433 // CHECK-NEXT: [[TMP2:%.*]] = sub <16 x i8> zeroinitializer, [[A]]
434 // CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP1]], <16 x i8> <i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127>, <16 x i8> [[TMP2]]
435 // CHECK-NEXT: [[TMP4:%.*]] = select <16 x i1> [[TMP0]], <16 x i8> [[A]], <16 x i8> [[TMP3]]
436 // CHECK-NEXT: ret <16 x i8> [[TMP4]]
437 //
test_vqabsq_s8(int8x16_t a)438 int8x16_t test_vqabsq_s8(int8x16_t a)
439 {
440 #ifdef POLYMORPHIC
441 return vqabsq(a);
442 #else /* POLYMORPHIC */
443 return vqabsq_s8(a);
444 #endif /* POLYMORPHIC */
445 }
446
447 // CHECK-LABEL: @test_vqabsq_s16(
448 // CHECK-NEXT: entry:
449 // CHECK-NEXT: [[TMP0:%.*]] = icmp sgt <8 x i16> [[A:%.*]], zeroinitializer
450 // CHECK-NEXT: [[TMP1:%.*]] = icmp eq <8 x i16> [[A]], <i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768>
451 // CHECK-NEXT: [[TMP2:%.*]] = sub <8 x i16> zeroinitializer, [[A]]
452 // CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP1]], <8 x i16> <i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767>, <8 x i16> [[TMP2]]
453 // CHECK-NEXT: [[TMP4:%.*]] = select <8 x i1> [[TMP0]], <8 x i16> [[A]], <8 x i16> [[TMP3]]
454 // CHECK-NEXT: ret <8 x i16> [[TMP4]]
455 //
test_vqabsq_s16(int16x8_t a)456 int16x8_t test_vqabsq_s16(int16x8_t a)
457 {
458 #ifdef POLYMORPHIC
459 return vqabsq(a);
460 #else /* POLYMORPHIC */
461 return vqabsq_s16(a);
462 #endif /* POLYMORPHIC */
463 }
464
465 // CHECK-LABEL: @test_vqabsq_s32(
466 // CHECK-NEXT: entry:
467 // CHECK-NEXT: [[TMP0:%.*]] = icmp sgt <4 x i32> [[A:%.*]], zeroinitializer
468 // CHECK-NEXT: [[TMP1:%.*]] = icmp eq <4 x i32> [[A]], <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
469 // CHECK-NEXT: [[TMP2:%.*]] = sub <4 x i32> zeroinitializer, [[A]]
470 // CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>, <4 x i32> [[TMP2]]
471 // CHECK-NEXT: [[TMP4:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[A]], <4 x i32> [[TMP3]]
472 // CHECK-NEXT: ret <4 x i32> [[TMP4]]
473 //
test_vqabsq_s32(int32x4_t a)474 int32x4_t test_vqabsq_s32(int32x4_t a)
475 {
476 #ifdef POLYMORPHIC
477 return vqabsq(a);
478 #else /* POLYMORPHIC */
479 return vqabsq_s32(a);
480 #endif /* POLYMORPHIC */
481 }
482
483 // CHECK-LABEL: @test_vqnegq_s8(
484 // CHECK-NEXT: entry:
485 // CHECK-NEXT: [[TMP0:%.*]] = icmp eq <16 x i8> [[A:%.*]], <i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128>
486 // CHECK-NEXT: [[TMP1:%.*]] = sub <16 x i8> zeroinitializer, [[A]]
487 // CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[TMP0]], <16 x i8> <i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127, i8 127>, <16 x i8> [[TMP1]]
488 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
489 //
test_vqnegq_s8(int8x16_t a)490 int8x16_t test_vqnegq_s8(int8x16_t a)
491 {
492 #ifdef POLYMORPHIC
493 return vqnegq(a);
494 #else /* POLYMORPHIC */
495 return vqnegq_s8(a);
496 #endif /* POLYMORPHIC */
497 }
498
499 // CHECK-LABEL: @test_vqnegq_s16(
500 // CHECK-NEXT: entry:
501 // CHECK-NEXT: [[TMP0:%.*]] = icmp eq <8 x i16> [[A:%.*]], <i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768>
502 // CHECK-NEXT: [[TMP1:%.*]] = sub <8 x i16> zeroinitializer, [[A]]
503 // CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP0]], <8 x i16> <i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767>, <8 x i16> [[TMP1]]
504 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
505 //
test_vqnegq_s16(int16x8_t a)506 int16x8_t test_vqnegq_s16(int16x8_t a)
507 {
508 #ifdef POLYMORPHIC
509 return vqnegq(a);
510 #else /* POLYMORPHIC */
511 return vqnegq_s16(a);
512 #endif /* POLYMORPHIC */
513 }
514
515 // CHECK-LABEL: @test_vqnegq_s32(
516 // CHECK-NEXT: entry:
517 // CHECK-NEXT: [[TMP0:%.*]] = icmp eq <4 x i32> [[A:%.*]], <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
518 // CHECK-NEXT: [[TMP1:%.*]] = sub <4 x i32> zeroinitializer, [[A]]
519 // CHECK-NEXT: [[TMP2:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>, <4 x i32> [[TMP1]]
520 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
521 //
test_vqnegq_s32(int32x4_t a)522 int32x4_t test_vqnegq_s32(int32x4_t a)
523 {
524 #ifdef POLYMORPHIC
525 return vqnegq(a);
526 #else /* POLYMORPHIC */
527 return vqnegq_s32(a);
528 #endif /* POLYMORPHIC */
529 }
530
531 // CHECK-LABEL: @test_vnegq_m_f16(
532 // CHECK-NEXT: entry:
533 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
534 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
535 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.neg.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], <8 x i1> [[TMP1]], <8 x half> [[INACTIVE:%.*]])
536 // CHECK-NEXT: ret <8 x half> [[TMP2]]
537 //
test_vnegq_m_f16(float16x8_t inactive,float16x8_t a,mve_pred16_t p)538 float16x8_t test_vnegq_m_f16(float16x8_t inactive, float16x8_t a, mve_pred16_t p)
539 {
540 #ifdef POLYMORPHIC
541 return vnegq_m(inactive, a, p);
542 #else /* POLYMORPHIC */
543 return vnegq_m_f16(inactive, a, p);
544 #endif /* POLYMORPHIC */
545 }
546
547 // CHECK-LABEL: @test_vnegq_m_f32(
548 // CHECK-NEXT: entry:
549 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
550 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
551 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.neg.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], <4 x i1> [[TMP1]], <4 x float> [[INACTIVE:%.*]])
552 // CHECK-NEXT: ret <4 x float> [[TMP2]]
553 //
test_vnegq_m_f32(float32x4_t inactive,float32x4_t a,mve_pred16_t p)554 float32x4_t test_vnegq_m_f32(float32x4_t inactive, float32x4_t a, mve_pred16_t p)
555 {
556 #ifdef POLYMORPHIC
557 return vnegq_m(inactive, a, p);
558 #else /* POLYMORPHIC */
559 return vnegq_m_f32(inactive, a, p);
560 #endif /* POLYMORPHIC */
561 }
562
563 // CHECK-LABEL: @test_vnegq_m_s8(
564 // CHECK-NEXT: entry:
565 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
566 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
567 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.neg.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
568 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
569 //
test_vnegq_m_s8(int8x16_t inactive,int8x16_t a,mve_pred16_t p)570 int8x16_t test_vnegq_m_s8(int8x16_t inactive, int8x16_t a, mve_pred16_t p)
571 {
572 #ifdef POLYMORPHIC
573 return vnegq_m(inactive, a, p);
574 #else /* POLYMORPHIC */
575 return vnegq_m_s8(inactive, a, p);
576 #endif /* POLYMORPHIC */
577 }
578
579 // CHECK-LABEL: @test_vnegq_m_s16(
580 // CHECK-NEXT: entry:
581 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
582 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
583 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.neg.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
584 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
585 //
test_vnegq_m_s16(int16x8_t inactive,int16x8_t a,mve_pred16_t p)586 int16x8_t test_vnegq_m_s16(int16x8_t inactive, int16x8_t a, mve_pred16_t p)
587 {
588 #ifdef POLYMORPHIC
589 return vnegq_m(inactive, a, p);
590 #else /* POLYMORPHIC */
591 return vnegq_m_s16(inactive, a, p);
592 #endif /* POLYMORPHIC */
593 }
594
595 // CHECK-LABEL: @test_vnegq_m_s32(
596 // CHECK-NEXT: entry:
597 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
598 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
599 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.neg.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
600 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
601 //
test_vnegq_m_s32(int32x4_t inactive,int32x4_t a,mve_pred16_t p)602 int32x4_t test_vnegq_m_s32(int32x4_t inactive, int32x4_t a, mve_pred16_t p)
603 {
604 #ifdef POLYMORPHIC
605 return vnegq_m(inactive, a, p);
606 #else /* POLYMORPHIC */
607 return vnegq_m_s32(inactive, a, p);
608 #endif /* POLYMORPHIC */
609 }
610
611 // CHECK-LABEL: @test_vnegq_x_f16(
612 // CHECK-NEXT: entry:
613 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
614 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
615 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.neg.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], <8 x i1> [[TMP1]], <8 x half> undef)
616 // CHECK-NEXT: ret <8 x half> [[TMP2]]
617 //
test_vnegq_x_f16(float16x8_t a,mve_pred16_t p)618 float16x8_t test_vnegq_x_f16(float16x8_t a, mve_pred16_t p)
619 {
620 #ifdef POLYMORPHIC
621 return vnegq_x(a, p);
622 #else /* POLYMORPHIC */
623 return vnegq_x_f16(a, p);
624 #endif /* POLYMORPHIC */
625 }
626
627 // CHECK-LABEL: @test_vnegq_x_f32(
628 // CHECK-NEXT: entry:
629 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
630 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
631 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.neg.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], <4 x i1> [[TMP1]], <4 x float> undef)
632 // CHECK-NEXT: ret <4 x float> [[TMP2]]
633 //
test_vnegq_x_f32(float32x4_t a,mve_pred16_t p)634 float32x4_t test_vnegq_x_f32(float32x4_t a, mve_pred16_t p)
635 {
636 #ifdef POLYMORPHIC
637 return vnegq_x(a, p);
638 #else /* POLYMORPHIC */
639 return vnegq_x_f32(a, p);
640 #endif /* POLYMORPHIC */
641 }
642
643 // CHECK-LABEL: @test_vnegq_x_s8(
644 // CHECK-NEXT: entry:
645 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
646 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
647 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.neg.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> undef)
648 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
649 //
test_vnegq_x_s8(int8x16_t a,mve_pred16_t p)650 int8x16_t test_vnegq_x_s8(int8x16_t a, mve_pred16_t p)
651 {
652 #ifdef POLYMORPHIC
653 return vnegq_x(a, p);
654 #else /* POLYMORPHIC */
655 return vnegq_x_s8(a, p);
656 #endif /* POLYMORPHIC */
657 }
658
659 // CHECK-LABEL: @test_vnegq_x_s16(
660 // CHECK-NEXT: entry:
661 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
662 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
663 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.neg.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> undef)
664 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
665 //
test_vnegq_x_s16(int16x8_t a,mve_pred16_t p)666 int16x8_t test_vnegq_x_s16(int16x8_t a, mve_pred16_t p)
667 {
668 #ifdef POLYMORPHIC
669 return vnegq_x(a, p);
670 #else /* POLYMORPHIC */
671 return vnegq_x_s16(a, p);
672 #endif /* POLYMORPHIC */
673 }
674
675 // CHECK-LABEL: @test_vnegq_x_s32(
676 // CHECK-NEXT: entry:
677 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
678 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
679 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.neg.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> undef)
680 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
681 //
test_vnegq_x_s32(int32x4_t a,mve_pred16_t p)682 int32x4_t test_vnegq_x_s32(int32x4_t a, mve_pred16_t p)
683 {
684 #ifdef POLYMORPHIC
685 return vnegq_x(a, p);
686 #else /* POLYMORPHIC */
687 return vnegq_x_s32(a, p);
688 #endif /* POLYMORPHIC */
689 }
690
691 // CHECK-LABEL: @test_vabsq_m_f16(
692 // CHECK-NEXT: entry:
693 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
694 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
695 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.abs.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], <8 x i1> [[TMP1]], <8 x half> [[INACTIVE:%.*]])
696 // CHECK-NEXT: ret <8 x half> [[TMP2]]
697 //
test_vabsq_m_f16(float16x8_t inactive,float16x8_t a,mve_pred16_t p)698 float16x8_t test_vabsq_m_f16(float16x8_t inactive, float16x8_t a, mve_pred16_t p)
699 {
700 #ifdef POLYMORPHIC
701 return vabsq_m(inactive, a, p);
702 #else /* POLYMORPHIC */
703 return vabsq_m_f16(inactive, a, p);
704 #endif /* POLYMORPHIC */
705 }
706
707 // CHECK-LABEL: @test_vabsq_m_f32(
708 // CHECK-NEXT: entry:
709 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
710 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
711 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.abs.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], <4 x i1> [[TMP1]], <4 x float> [[INACTIVE:%.*]])
712 // CHECK-NEXT: ret <4 x float> [[TMP2]]
713 //
test_vabsq_m_f32(float32x4_t inactive,float32x4_t a,mve_pred16_t p)714 float32x4_t test_vabsq_m_f32(float32x4_t inactive, float32x4_t a, mve_pred16_t p)
715 {
716 #ifdef POLYMORPHIC
717 return vabsq_m(inactive, a, p);
718 #else /* POLYMORPHIC */
719 return vabsq_m_f32(inactive, a, p);
720 #endif /* POLYMORPHIC */
721 }
722
723 // CHECK-LABEL: @test_vabsq_m_s8(
724 // CHECK-NEXT: entry:
725 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
726 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
727 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.abs.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
728 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
729 //
test_vabsq_m_s8(int8x16_t inactive,int8x16_t a,mve_pred16_t p)730 int8x16_t test_vabsq_m_s8(int8x16_t inactive, int8x16_t a, mve_pred16_t p)
731 {
732 #ifdef POLYMORPHIC
733 return vabsq_m(inactive, a, p);
734 #else /* POLYMORPHIC */
735 return vabsq_m_s8(inactive, a, p);
736 #endif /* POLYMORPHIC */
737 }
738
739 // CHECK-LABEL: @test_vabsq_m_s16(
740 // CHECK-NEXT: entry:
741 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
742 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
743 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.abs.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
744 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
745 //
test_vabsq_m_s16(int16x8_t inactive,int16x8_t a,mve_pred16_t p)746 int16x8_t test_vabsq_m_s16(int16x8_t inactive, int16x8_t a, mve_pred16_t p)
747 {
748 #ifdef POLYMORPHIC
749 return vabsq_m(inactive, a, p);
750 #else /* POLYMORPHIC */
751 return vabsq_m_s16(inactive, a, p);
752 #endif /* POLYMORPHIC */
753 }
754
755 // CHECK-LABEL: @test_vabsq_m_s32(
756 // CHECK-NEXT: entry:
757 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
758 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
759 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.abs.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
760 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
761 //
test_vabsq_m_s32(int32x4_t inactive,int32x4_t a,mve_pred16_t p)762 int32x4_t test_vabsq_m_s32(int32x4_t inactive, int32x4_t a, mve_pred16_t p)
763 {
764 #ifdef POLYMORPHIC
765 return vabsq_m(inactive, a, p);
766 #else /* POLYMORPHIC */
767 return vabsq_m_s32(inactive, a, p);
768 #endif /* POLYMORPHIC */
769 }
770
771 // CHECK-LABEL: @test_vabsq_x_f16(
772 // CHECK-NEXT: entry:
773 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
774 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
775 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.abs.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], <8 x i1> [[TMP1]], <8 x half> undef)
776 // CHECK-NEXT: ret <8 x half> [[TMP2]]
777 //
test_vabsq_x_f16(float16x8_t a,mve_pred16_t p)778 float16x8_t test_vabsq_x_f16(float16x8_t a, mve_pred16_t p)
779 {
780 #ifdef POLYMORPHIC
781 return vabsq_x(a, p);
782 #else /* POLYMORPHIC */
783 return vabsq_x_f16(a, p);
784 #endif /* POLYMORPHIC */
785 }
786
787 // CHECK-LABEL: @test_vabsq_x_f32(
788 // CHECK-NEXT: entry:
789 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
790 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
791 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.abs.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], <4 x i1> [[TMP1]], <4 x float> undef)
792 // CHECK-NEXT: ret <4 x float> [[TMP2]]
793 //
test_vabsq_x_f32(float32x4_t a,mve_pred16_t p)794 float32x4_t test_vabsq_x_f32(float32x4_t a, mve_pred16_t p)
795 {
796 #ifdef POLYMORPHIC
797 return vabsq_x(a, p);
798 #else /* POLYMORPHIC */
799 return vabsq_x_f32(a, p);
800 #endif /* POLYMORPHIC */
801 }
802
803 // CHECK-LABEL: @test_vabsq_x_s8(
804 // CHECK-NEXT: entry:
805 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
806 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
807 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.abs.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> undef)
808 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
809 //
test_vabsq_x_s8(int8x16_t a,mve_pred16_t p)810 int8x16_t test_vabsq_x_s8(int8x16_t a, mve_pred16_t p)
811 {
812 #ifdef POLYMORPHIC
813 return vabsq_x(a, p);
814 #else /* POLYMORPHIC */
815 return vabsq_x_s8(a, p);
816 #endif /* POLYMORPHIC */
817 }
818
819 // CHECK-LABEL: @test_vabsq_x_s16(
820 // CHECK-NEXT: entry:
821 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
822 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
823 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.abs.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> undef)
824 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
825 //
test_vabsq_x_s16(int16x8_t a,mve_pred16_t p)826 int16x8_t test_vabsq_x_s16(int16x8_t a, mve_pred16_t p)
827 {
828 #ifdef POLYMORPHIC
829 return vabsq_x(a, p);
830 #else /* POLYMORPHIC */
831 return vabsq_x_s16(a, p);
832 #endif /* POLYMORPHIC */
833 }
834
835 // CHECK-LABEL: @test_vabsq_x_s32(
836 // CHECK-NEXT: entry:
837 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
838 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
839 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.abs.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> undef)
840 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
841 //
test_vabsq_x_s32(int32x4_t a,mve_pred16_t p)842 int32x4_t test_vabsq_x_s32(int32x4_t a, mve_pred16_t p)
843 {
844 #ifdef POLYMORPHIC
845 return vabsq_x(a, p);
846 #else /* POLYMORPHIC */
847 return vabsq_x_s32(a, p);
848 #endif /* POLYMORPHIC */
849 }
850
851 // CHECK-LABEL: @test_vqnegq_m_s8(
852 // CHECK-NEXT: entry:
853 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
854 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
855 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.qneg.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
856 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
857 //
test_vqnegq_m_s8(int8x16_t inactive,int8x16_t a,mve_pred16_t p)858 int8x16_t test_vqnegq_m_s8(int8x16_t inactive, int8x16_t a, mve_pred16_t p)
859 {
860 #ifdef POLYMORPHIC
861 return vqnegq_m(inactive, a, p);
862 #else /* POLYMORPHIC */
863 return vqnegq_m_s8(inactive, a, p);
864 #endif /* POLYMORPHIC */
865 }
866
867 // CHECK-LABEL: @test_vqnegq_m_s16(
868 // CHECK-NEXT: entry:
869 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
870 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
871 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.qneg.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
872 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
873 //
test_vqnegq_m_s16(int16x8_t inactive,int16x8_t a,mve_pred16_t p)874 int16x8_t test_vqnegq_m_s16(int16x8_t inactive, int16x8_t a, mve_pred16_t p)
875 {
876 #ifdef POLYMORPHIC
877 return vqnegq_m(inactive, a, p);
878 #else /* POLYMORPHIC */
879 return vqnegq_m_s16(inactive, a, p);
880 #endif /* POLYMORPHIC */
881 }
882
883 // CHECK-LABEL: @test_vqnegq_m_s32(
884 // CHECK-NEXT: entry:
885 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
886 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
887 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.qneg.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
888 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
889 //
test_vqnegq_m_s32(int32x4_t inactive,int32x4_t a,mve_pred16_t p)890 int32x4_t test_vqnegq_m_s32(int32x4_t inactive, int32x4_t a, mve_pred16_t p)
891 {
892 #ifdef POLYMORPHIC
893 return vqnegq_m(inactive, a, p);
894 #else /* POLYMORPHIC */
895 return vqnegq_m_s32(inactive, a, p);
896 #endif /* POLYMORPHIC */
897 }
898
899 // CHECK-LABEL: @test_vqabsq_m_s8(
900 // CHECK-NEXT: entry:
901 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
902 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
903 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.qabs.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
904 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
905 //
test_vqabsq_m_s8(int8x16_t inactive,int8x16_t a,mve_pred16_t p)906 int8x16_t test_vqabsq_m_s8(int8x16_t inactive, int8x16_t a, mve_pred16_t p)
907 {
908 #ifdef POLYMORPHIC
909 return vqabsq_m(inactive, a, p);
910 #else /* POLYMORPHIC */
911 return vqabsq_m_s8(inactive, a, p);
912 #endif /* POLYMORPHIC */
913 }
914
915 // CHECK-LABEL: @test_vqabsq_m_s16(
916 // CHECK-NEXT: entry:
917 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
918 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
919 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.qabs.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
920 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
921 //
test_vqabsq_m_s16(int16x8_t inactive,int16x8_t a,mve_pred16_t p)922 int16x8_t test_vqabsq_m_s16(int16x8_t inactive, int16x8_t a, mve_pred16_t p)
923 {
924 #ifdef POLYMORPHIC
925 return vqabsq_m(inactive, a, p);
926 #else /* POLYMORPHIC */
927 return vqabsq_m_s16(inactive, a, p);
928 #endif /* POLYMORPHIC */
929 }
930
931 // CHECK-LABEL: @test_vqabsq_m_s32(
932 // CHECK-NEXT: entry:
933 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
934 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
935 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.qabs.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
936 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
937 //
test_vqabsq_m_s32(int32x4_t inactive,int32x4_t a,mve_pred16_t p)938 int32x4_t test_vqabsq_m_s32(int32x4_t inactive, int32x4_t a, mve_pred16_t p)
939 {
940 #ifdef POLYMORPHIC
941 return vqabsq_m(inactive, a, p);
942 #else /* POLYMORPHIC */
943 return vqabsq_m_s32(inactive, a, p);
944 #endif /* POLYMORPHIC */
945 }
946
947