1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
3 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -DPOLYMORPHIC -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
4 
5 #include <arm_mve.h>
6 
7 // CHECK-LABEL: @test_vcmulq_f16(
8 // CHECK-NEXT:  entry:
9 // CHECK-NEXT:    [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32 0, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]])
10 // CHECK-NEXT:    ret <8 x half> [[TMP0]]
11 //
test_vcmulq_f16(float16x8_t a,float16x8_t b)12 float16x8_t test_vcmulq_f16(float16x8_t a, float16x8_t b)
13 {
14 #ifdef POLYMORPHIC
15     return vcmulq(a, b);
16 #else
17     return vcmulq_f16(a, b);
18 #endif
19 }
20 
21 // CHECK-LABEL: @test_vcmulq_f32(
22 // CHECK-NEXT:  entry:
23 // CHECK-NEXT:    [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32 0, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]])
24 // CHECK-NEXT:    ret <4 x float> [[TMP0]]
25 //
test_vcmulq_f32(float32x4_t a,float32x4_t b)26 float32x4_t test_vcmulq_f32(float32x4_t a, float32x4_t b)
27 {
28 #ifdef POLYMORPHIC
29     return vcmulq(a, b);
30 #else
31     return vcmulq_f32(a, b);
32 #endif
33 }
34 
35 // CHECK-LABEL: @test_vcmulq_rot90_f16(
36 // CHECK-NEXT:  entry:
37 // CHECK-NEXT:    [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32 1, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]])
38 // CHECK-NEXT:    ret <8 x half> [[TMP0]]
39 //
test_vcmulq_rot90_f16(float16x8_t a,float16x8_t b)40 float16x8_t test_vcmulq_rot90_f16(float16x8_t a, float16x8_t b)
41 {
42 #ifdef POLYMORPHIC
43     return vcmulq_rot90(a, b);
44 #else
45     return vcmulq_rot90_f16(a, b);
46 #endif
47 }
48 
49 // CHECK-LABEL: @test_vcmulq_rot90_f32(
50 // CHECK-NEXT:  entry:
51 // CHECK-NEXT:    [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32 1, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]])
52 // CHECK-NEXT:    ret <4 x float> [[TMP0]]
53 //
test_vcmulq_rot90_f32(float32x4_t a,float32x4_t b)54 float32x4_t test_vcmulq_rot90_f32(float32x4_t a, float32x4_t b)
55 {
56 #ifdef POLYMORPHIC
57     return vcmulq_rot90(a, b);
58 #else
59     return vcmulq_rot90_f32(a, b);
60 #endif
61 }
62 
63 // CHECK-LABEL: @test_vcmulq_rot180_f16(
64 // CHECK-NEXT:  entry:
65 // CHECK-NEXT:    [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32 2, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]])
66 // CHECK-NEXT:    ret <8 x half> [[TMP0]]
67 //
test_vcmulq_rot180_f16(float16x8_t a,float16x8_t b)68 float16x8_t test_vcmulq_rot180_f16(float16x8_t a, float16x8_t b)
69 {
70 #ifdef POLYMORPHIC
71     return vcmulq_rot180(a, b);
72 #else
73     return vcmulq_rot180_f16(a, b);
74 #endif
75 }
76 
77 // CHECK-LABEL: @test_vcmulq_rot180_f32(
78 // CHECK-NEXT:  entry:
79 // CHECK-NEXT:    [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32 2, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]])
80 // CHECK-NEXT:    ret <4 x float> [[TMP0]]
81 //
test_vcmulq_rot180_f32(float32x4_t a,float32x4_t b)82 float32x4_t test_vcmulq_rot180_f32(float32x4_t a, float32x4_t b)
83 {
84 #ifdef POLYMORPHIC
85     return vcmulq_rot180(a, b);
86 #else
87     return vcmulq_rot180_f32(a, b);
88 #endif
89 }
90 
91 // CHECK-LABEL: @test_vcmulq_rot270_f16(
92 // CHECK-NEXT:  entry:
93 // CHECK-NEXT:    [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32 3, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]])
94 // CHECK-NEXT:    ret <8 x half> [[TMP0]]
95 //
test_vcmulq_rot270_f16(float16x8_t a,float16x8_t b)96 float16x8_t test_vcmulq_rot270_f16(float16x8_t a, float16x8_t b)
97 {
98 #ifdef POLYMORPHIC
99     return vcmulq_rot270(a, b);
100 #else
101     return vcmulq_rot270_f16(a, b);
102 #endif
103 }
104 
105 // CHECK-LABEL: @test_vcmulq_rot270_f32(
106 // CHECK-NEXT:  entry:
107 // CHECK-NEXT:    [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32 3, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]])
108 // CHECK-NEXT:    ret <4 x float> [[TMP0]]
109 //
test_vcmulq_rot270_f32(float32x4_t a,float32x4_t b)110 float32x4_t test_vcmulq_rot270_f32(float32x4_t a, float32x4_t b)
111 {
112 #ifdef POLYMORPHIC
113     return vcmulq_rot270(a, b);
114 #else
115     return vcmulq_rot270_f32(a, b);
116 #endif
117 }
118 
119 // CHECK-LABEL: @test_vcmulq_m_f16(
120 // CHECK-NEXT:  entry:
121 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
122 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
123 // CHECK-NEXT:    [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 0, <8 x half> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
124 // CHECK-NEXT:    ret <8 x half> [[TMP2]]
125 //
test_vcmulq_m_f16(float16x8_t inactive,float16x8_t a,float16x8_t b,mve_pred16_t p)126 float16x8_t test_vcmulq_m_f16(float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
127 {
128 #ifdef polymorphic
129     return vcmulq_m(inactive, a, b, p);
130 #else
131     return vcmulq_m_f16(inactive, a, b, p);
132 #endif
133 }
134 
135 // CHECK-LABEL: @test_vcmulq_m_f32(
136 // CHECK-NEXT:  entry:
137 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
138 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
139 // CHECK-NEXT:    [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.predicated.v4f32.v4i1(i32 0, <4 x float> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
140 // CHECK-NEXT:    ret <4 x float> [[TMP2]]
141 //
test_vcmulq_m_f32(float32x4_t inactive,float32x4_t a,float32x4_t b,mve_pred16_t p)142 float32x4_t test_vcmulq_m_f32(float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
143 {
144 #ifdef polymorphic
145     return vcmulq_m(inactive, a, b, p);
146 #else
147     return vcmulq_m_f32(inactive, a, b, p);
148 #endif
149 }
150 
151 // CHECK-LABEL: @test_vcmulq_rot90_m_f16(
152 // CHECK-NEXT:  entry:
153 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
154 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
155 // CHECK-NEXT:    [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 1, <8 x half> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
156 // CHECK-NEXT:    ret <8 x half> [[TMP2]]
157 //
test_vcmulq_rot90_m_f16(float16x8_t inactive,float16x8_t a,float16x8_t b,mve_pred16_t p)158 float16x8_t test_vcmulq_rot90_m_f16(float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
159 {
160 #ifdef polymorphic
161     return vcmulq_rot90_m(inactive, a, b, p);
162 #else
163     return vcmulq_rot90_m_f16(inactive, a, b, p);
164 #endif
165 }
166 
167 // CHECK-LABEL: @test_vcmulq_rot90_m_f32(
168 // CHECK-NEXT:  entry:
169 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
170 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
171 // CHECK-NEXT:    [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.predicated.v4f32.v4i1(i32 1, <4 x float> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
172 // CHECK-NEXT:    ret <4 x float> [[TMP2]]
173 //
test_vcmulq_rot90_m_f32(float32x4_t inactive,float32x4_t a,float32x4_t b,mve_pred16_t p)174 float32x4_t test_vcmulq_rot90_m_f32(float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
175 {
176 #ifdef polymorphic
177     return vcmulq_rot90_m(inactive, a, b, p);
178 #else
179     return vcmulq_rot90_m_f32(inactive, a, b, p);
180 #endif
181 }
182 
183 // CHECK-LABEL: @test_vcmulq_rot180_m_f16(
184 // CHECK-NEXT:  entry:
185 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
186 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
187 // CHECK-NEXT:    [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 2, <8 x half> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
188 // CHECK-NEXT:    ret <8 x half> [[TMP2]]
189 //
test_vcmulq_rot180_m_f16(float16x8_t inactive,float16x8_t a,float16x8_t b,mve_pred16_t p)190 float16x8_t test_vcmulq_rot180_m_f16(float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
191 {
192 #ifdef polymorphic
193     return vcmulq_rot180_m(inactive, a, b, p);
194 #else
195     return vcmulq_rot180_m_f16(inactive, a, b, p);
196 #endif
197 }
198 
199 // CHECK-LABEL: @test_vcmulq_rot180_m_f32(
200 // CHECK-NEXT:  entry:
201 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
202 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
203 // CHECK-NEXT:    [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.predicated.v4f32.v4i1(i32 2, <4 x float> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
204 // CHECK-NEXT:    ret <4 x float> [[TMP2]]
205 //
test_vcmulq_rot180_m_f32(float32x4_t inactive,float32x4_t a,float32x4_t b,mve_pred16_t p)206 float32x4_t test_vcmulq_rot180_m_f32(float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
207 {
208 #ifdef polymorphic
209     return vcmulq_rot180_m(inactive, a, b, p);
210 #else
211     return vcmulq_rot180_m_f32(inactive, a, b, p);
212 #endif
213 }
214 
215 // CHECK-LABEL: @test_vcmulq_rot270_m_f16(
216 // CHECK-NEXT:  entry:
217 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
218 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
219 // CHECK-NEXT:    [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 3, <8 x half> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
220 // CHECK-NEXT:    ret <8 x half> [[TMP2]]
221 //
test_vcmulq_rot270_m_f16(float16x8_t inactive,float16x8_t a,float16x8_t b,mve_pred16_t p)222 float16x8_t test_vcmulq_rot270_m_f16(float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
223 {
224 #ifdef polymorphic
225     return vcmulq_rot270_m(inactive, a, b, p);
226 #else
227     return vcmulq_rot270_m_f16(inactive, a, b, p);
228 #endif
229 }
230 
231 // CHECK-LABEL: @test_vcmulq_rot270_m_f32(
232 // CHECK-NEXT:  entry:
233 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
234 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
235 // CHECK-NEXT:    [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.predicated.v4f32.v4i1(i32 3, <4 x float> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
236 // CHECK-NEXT:    ret <4 x float> [[TMP2]]
237 //
test_vcmulq_rot270_m_f32(float32x4_t inactive,float32x4_t a,float32x4_t b,mve_pred16_t p)238 float32x4_t test_vcmulq_rot270_m_f32(float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
239 {
240 #ifdef polymorphic
241     return vcmulq_rot270_m(inactive, a, b, p);
242 #else
243     return vcmulq_rot270_m_f32(inactive, a, b, p);
244 #endif
245 }
246 
247 // CHECK-LABEL: @test_vcmulq_x_f16(
248 // CHECK-NEXT:  entry:
249 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
250 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
251 // CHECK-NEXT:    [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 0, <8 x half> undef, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
252 // CHECK-NEXT:    ret <8 x half> [[TMP2]]
253 //
test_vcmulq_x_f16(float16x8_t a,float16x8_t b,mve_pred16_t p)254 float16x8_t test_vcmulq_x_f16(float16x8_t a, float16x8_t b, mve_pred16_t p)
255 {
256 #ifdef POLYMORPHIC
257     return vcmulq_x(a, b, p);
258 #else
259     return vcmulq_x_f16(a, b, p);
260 #endif
261 }
262 
263 // CHECK-LABEL: @test_vcmulq_x_f32(
264 // CHECK-NEXT:  entry:
265 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
266 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
267 // CHECK-NEXT:    [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.predicated.v4f32.v4i1(i32 0, <4 x float> undef, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
268 // CHECK-NEXT:    ret <4 x float> [[TMP2]]
269 //
test_vcmulq_x_f32(float32x4_t a,float32x4_t b,mve_pred16_t p)270 float32x4_t test_vcmulq_x_f32(float32x4_t a, float32x4_t b, mve_pred16_t p)
271 {
272 #ifdef POLYMORPHIC
273     return vcmulq_x(a, b, p);
274 #else
275     return vcmulq_x_f32(a, b, p);
276 #endif
277 }
278 
279 // CHECK-LABEL: @test_vcmulq_rot90_x_f16(
280 // CHECK-NEXT:  entry:
281 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
282 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
283 // CHECK-NEXT:    [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 1, <8 x half> undef, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
284 // CHECK-NEXT:    ret <8 x half> [[TMP2]]
285 //
test_vcmulq_rot90_x_f16(float16x8_t a,float16x8_t b,mve_pred16_t p)286 float16x8_t test_vcmulq_rot90_x_f16(float16x8_t a, float16x8_t b, mve_pred16_t p)
287 {
288 #ifdef POLYMORPHIC
289     return vcmulq_rot90_x(a, b, p);
290 #else
291     return vcmulq_rot90_x_f16(a, b, p);
292 #endif
293 }
294 
295 // CHECK-LABEL: @test_vcmulq_rot90_x_f32(
296 // CHECK-NEXT:  entry:
297 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
298 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
299 // CHECK-NEXT:    [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.predicated.v4f32.v4i1(i32 1, <4 x float> undef, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
300 // CHECK-NEXT:    ret <4 x float> [[TMP2]]
301 //
test_vcmulq_rot90_x_f32(float32x4_t a,float32x4_t b,mve_pred16_t p)302 float32x4_t test_vcmulq_rot90_x_f32(float32x4_t a, float32x4_t b, mve_pred16_t p)
303 {
304 #ifdef POLYMORPHIC
305     return vcmulq_rot90_x(a, b, p);
306 #else
307     return vcmulq_rot90_x_f32(a, b, p);
308 #endif
309 }
310 
311 // CHECK-LABEL: @test_vcmulq_rot180_x_f16(
312 // CHECK-NEXT:  entry:
313 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
314 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
315 // CHECK-NEXT:    [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 2, <8 x half> undef, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
316 // CHECK-NEXT:    ret <8 x half> [[TMP2]]
317 //
test_vcmulq_rot180_x_f16(float16x8_t a,float16x8_t b,mve_pred16_t p)318 float16x8_t test_vcmulq_rot180_x_f16(float16x8_t a, float16x8_t b, mve_pred16_t p)
319 {
320 #ifdef POLYMORPHIC
321     return vcmulq_rot180_x(a, b, p);
322 #else
323     return vcmulq_rot180_x_f16(a, b, p);
324 #endif
325 }
326 
327 // CHECK-LABEL: @test_vcmulq_rot180_x_f32(
328 // CHECK-NEXT:  entry:
329 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
330 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
331 // CHECK-NEXT:    [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.predicated.v4f32.v4i1(i32 2, <4 x float> undef, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
332 // CHECK-NEXT:    ret <4 x float> [[TMP2]]
333 //
test_vcmulq_rot180_x_f32(float32x4_t a,float32x4_t b,mve_pred16_t p)334 float32x4_t test_vcmulq_rot180_x_f32(float32x4_t a, float32x4_t b, mve_pred16_t p)
335 {
336 #ifdef POLYMORPHIC
337     return vcmulq_rot180_x(a, b, p);
338 #else
339     return vcmulq_rot180_x_f32(a, b, p);
340 #endif
341 }
342 
343 // CHECK-LABEL: @test_vcmulq_rot270_x_f16(
344 // CHECK-NEXT:  entry:
345 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
346 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
347 // CHECK-NEXT:    [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 3, <8 x half> undef, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
348 // CHECK-NEXT:    ret <8 x half> [[TMP2]]
349 //
test_vcmulq_rot270_x_f16(float16x8_t a,float16x8_t b,mve_pred16_t p)350 float16x8_t test_vcmulq_rot270_x_f16(float16x8_t a, float16x8_t b, mve_pred16_t p)
351 {
352 #ifdef POLYMORPHIC
353     return vcmulq_rot270_x(a, b, p);
354 #else
355     return vcmulq_rot270_x_f16(a, b, p);
356 #endif
357 }
358 
359 // CHECK-LABEL: @test_vcmulq_rot270_x_f32(
360 // CHECK-NEXT:  entry:
361 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
362 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
363 // CHECK-NEXT:    [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.predicated.v4f32.v4i1(i32 3, <4 x float> undef, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
364 // CHECK-NEXT:    ret <4 x float> [[TMP2]]
365 //
test_vcmulq_rot270_x_f32(float32x4_t a,float32x4_t b,mve_pred16_t p)366 float32x4_t test_vcmulq_rot270_x_f32(float32x4_t a, float32x4_t b, mve_pred16_t p)
367 {
368 #ifdef POLYMORPHIC
369     return vcmulq_rot270_x(a, b, p);
370 #else
371     return vcmulq_rot270_x_f32(a, b, p);
372 #endif
373 }
374