1 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
2 // RUN:  -disable-O0-optnone -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
3 
4 // Test new aarch64 intrinsics and types
5 
6 #include <arm_neon.h>
7 
8 // CHECK-LABEL: define i16 @test_vaddlv_s8(<8 x i8> %a) #0 {
9 // CHECK:   [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v8i8(<8 x i8> %a) #3
10 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VADDLV_I]] to i16
11 // CHECK:   ret i16 [[TMP0]]
test_vaddlv_s8(int8x8_t a)12 int16_t test_vaddlv_s8(int8x8_t a) {
13   return vaddlv_s8(a);
14 }
15 
16 // CHECK-LABEL: define i32 @test_vaddlv_s16(<4 x i16> %a) #0 {
17 // CHECK:   [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v4i16(<4 x i16> %a) #3
18 // CHECK:   ret i32 [[VADDLV_I]]
test_vaddlv_s16(int16x4_t a)19 int32_t test_vaddlv_s16(int16x4_t a) {
20   return vaddlv_s16(a);
21 }
22 
23 // CHECK-LABEL: define i16 @test_vaddlv_u8(<8 x i8> %a) #0 {
24 // CHECK:   [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8> %a) #3
25 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VADDLV_I]] to i16
26 // CHECK:   ret i16 [[TMP0]]
test_vaddlv_u8(uint8x8_t a)27 uint16_t test_vaddlv_u8(uint8x8_t a) {
28   return vaddlv_u8(a);
29 }
30 
31 // CHECK-LABEL: define i32 @test_vaddlv_u16(<4 x i16> %a) #0 {
32 // CHECK:   [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v4i16(<4 x i16> %a) #3
33 // CHECK:   ret i32 [[VADDLV_I]]
test_vaddlv_u16(uint16x4_t a)34 uint32_t test_vaddlv_u16(uint16x4_t a) {
35   return vaddlv_u16(a);
36 }
37 
38 // CHECK-LABEL: define i16 @test_vaddlvq_s8(<16 x i8> %a) #1 {
39 // CHECK:   [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v16i8(<16 x i8> %a) #3
40 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VADDLV_I]] to i16
41 // CHECK:   ret i16 [[TMP0]]
test_vaddlvq_s8(int8x16_t a)42 int16_t test_vaddlvq_s8(int8x16_t a) {
43   return vaddlvq_s8(a);
44 }
45 
46 // CHECK-LABEL: define i32 @test_vaddlvq_s16(<8 x i16> %a) #1 {
47 // CHECK:   [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v8i16(<8 x i16> %a) #3
48 // CHECK:   ret i32 [[VADDLV_I]]
test_vaddlvq_s16(int16x8_t a)49 int32_t test_vaddlvq_s16(int16x8_t a) {
50   return vaddlvq_s16(a);
51 }
52 
53 // CHECK-LABEL: define i64 @test_vaddlvq_s32(<4 x i32> %a) #1 {
54 // CHECK:   [[VADDLVQ_S32_I:%.*]] = call i64 @llvm.aarch64.neon.saddlv.i64.v4i32(<4 x i32> %a) #3
55 // CHECK:   ret i64 [[VADDLVQ_S32_I]]
test_vaddlvq_s32(int32x4_t a)56 int64_t test_vaddlvq_s32(int32x4_t a) {
57   return vaddlvq_s32(a);
58 }
59 
60 // CHECK-LABEL: define i16 @test_vaddlvq_u8(<16 x i8> %a) #1 {
61 // CHECK:   [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8> %a) #3
62 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VADDLV_I]] to i16
63 // CHECK:   ret i16 [[TMP0]]
test_vaddlvq_u8(uint8x16_t a)64 uint16_t test_vaddlvq_u8(uint8x16_t a) {
65   return vaddlvq_u8(a);
66 }
67 
68 // CHECK-LABEL: define i32 @test_vaddlvq_u16(<8 x i16> %a) #1 {
69 // CHECK:   [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v8i16(<8 x i16> %a) #3
70 // CHECK:   ret i32 [[VADDLV_I]]
test_vaddlvq_u16(uint16x8_t a)71 uint32_t test_vaddlvq_u16(uint16x8_t a) {
72   return vaddlvq_u16(a);
73 }
74 
75 // CHECK-LABEL: define i64 @test_vaddlvq_u32(<4 x i32> %a) #1 {
76 // CHECK:   [[VADDLVQ_U32_I:%.*]] = call i64 @llvm.aarch64.neon.uaddlv.i64.v4i32(<4 x i32> %a) #3
77 // CHECK:   ret i64 [[VADDLVQ_U32_I]]
test_vaddlvq_u32(uint32x4_t a)78 uint64_t test_vaddlvq_u32(uint32x4_t a) {
79   return vaddlvq_u32(a);
80 }
81 
82 // CHECK-LABEL: define i8 @test_vmaxv_s8(<8 x i8> %a) #0 {
83 // CHECK:   [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8> %a) #3
84 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8
85 // CHECK:   ret i8 [[TMP0]]
test_vmaxv_s8(int8x8_t a)86 int8_t test_vmaxv_s8(int8x8_t a) {
87   return vmaxv_s8(a);
88 }
89 
90 // CHECK-LABEL: define i16 @test_vmaxv_s16(<4 x i16> %a) #0 {
91 // CHECK:   [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16> %a) #3
92 // CHECK:   [[TMP2:%.*]] = trunc i32 [[VMAXV_I]] to i16
93 // CHECK:   ret i16 [[TMP2]]
test_vmaxv_s16(int16x4_t a)94 int16_t test_vmaxv_s16(int16x4_t a) {
95   return vmaxv_s16(a);
96 }
97 
98 // CHECK-LABEL: define i8 @test_vmaxv_u8(<8 x i8> %a) #0 {
99 // CHECK:   [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8> %a) #3
100 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8
101 // CHECK:   ret i8 [[TMP0]]
test_vmaxv_u8(uint8x8_t a)102 uint8_t test_vmaxv_u8(uint8x8_t a) {
103   return vmaxv_u8(a);
104 }
105 
106 // CHECK-LABEL: define i16 @test_vmaxv_u16(<4 x i16> %a) #0 {
107 // CHECK:   [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16> %a) #3
108 // CHECK:   [[TMP2:%.*]] = trunc i32 [[VMAXV_I]] to i16
109 // CHECK:   ret i16 [[TMP2]]
test_vmaxv_u16(uint16x4_t a)110 uint16_t test_vmaxv_u16(uint16x4_t a) {
111   return vmaxv_u16(a);
112 }
113 
114 // CHECK-LABEL: define i8 @test_vmaxvq_s8(<16 x i8> %a) #1 {
115 // CHECK:   [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8> %a) #3
116 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8
117 // CHECK:   ret i8 [[TMP0]]
test_vmaxvq_s8(int8x16_t a)118 int8_t test_vmaxvq_s8(int8x16_t a) {
119   return vmaxvq_s8(a);
120 }
121 
122 // CHECK-LABEL: define i16 @test_vmaxvq_s16(<8 x i16> %a) #1 {
123 // CHECK:   [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16> %a) #3
124 // CHECK:   [[TMP2:%.*]] = trunc i32 [[VMAXV_I]] to i16
125 // CHECK:   ret i16 [[TMP2]]
test_vmaxvq_s16(int16x8_t a)126 int16_t test_vmaxvq_s16(int16x8_t a) {
127   return vmaxvq_s16(a);
128 }
129 
130 // CHECK-LABEL: define i32 @test_vmaxvq_s32(<4 x i32> %a) #1 {
131 // CHECK:   [[VMAXVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32> %a) #3
132 // CHECK:   ret i32 [[VMAXVQ_S32_I]]
test_vmaxvq_s32(int32x4_t a)133 int32_t test_vmaxvq_s32(int32x4_t a) {
134   return vmaxvq_s32(a);
135 }
136 
137 // CHECK-LABEL: define i8 @test_vmaxvq_u8(<16 x i8> %a) #1 {
138 // CHECK:   [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> %a) #3
139 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8
140 // CHECK:   ret i8 [[TMP0]]
test_vmaxvq_u8(uint8x16_t a)141 uint8_t test_vmaxvq_u8(uint8x16_t a) {
142   return vmaxvq_u8(a);
143 }
144 
145 // CHECK-LABEL: define i16 @test_vmaxvq_u16(<8 x i16> %a) #1 {
146 // CHECK:   [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16> %a) #3
147 // CHECK:   [[TMP2:%.*]] = trunc i32 [[VMAXV_I]] to i16
148 // CHECK:   ret i16 [[TMP2]]
test_vmaxvq_u16(uint16x8_t a)149 uint16_t test_vmaxvq_u16(uint16x8_t a) {
150   return vmaxvq_u16(a);
151 }
152 
153 // CHECK-LABEL: define i32 @test_vmaxvq_u32(<4 x i32> %a) #1 {
154 // CHECK:   [[VMAXVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v4i32(<4 x i32> %a) #3
155 // CHECK:   ret i32 [[VMAXVQ_U32_I]]
test_vmaxvq_u32(uint32x4_t a)156 uint32_t test_vmaxvq_u32(uint32x4_t a) {
157   return vmaxvq_u32(a);
158 }
159 
160 // CHECK-LABEL: define i8 @test_vminv_s8(<8 x i8> %a) #0 {
161 // CHECK:   [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8> %a) #3
162 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8
163 // CHECK:   ret i8 [[TMP0]]
test_vminv_s8(int8x8_t a)164 int8_t test_vminv_s8(int8x8_t a) {
165   return vminv_s8(a);
166 }
167 
168 // CHECK-LABEL: define i16 @test_vminv_s16(<4 x i16> %a) #0 {
169 // CHECK:   [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16> %a) #3
170 // CHECK:   [[TMP2:%.*]] = trunc i32 [[VMINV_I]] to i16
171 // CHECK:   ret i16 [[TMP2]]
test_vminv_s16(int16x4_t a)172 int16_t test_vminv_s16(int16x4_t a) {
173   return vminv_s16(a);
174 }
175 
176 // CHECK-LABEL: define i8 @test_vminv_u8(<8 x i8> %a) #0 {
177 // CHECK:   [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8> %a) #3
178 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8
179 // CHECK:   ret i8 [[TMP0]]
test_vminv_u8(uint8x8_t a)180 uint8_t test_vminv_u8(uint8x8_t a) {
181   return vminv_u8(a);
182 }
183 
184 // CHECK-LABEL: define i16 @test_vminv_u16(<4 x i16> %a) #0 {
185 // CHECK:   [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16> %a) #3
186 // CHECK:   [[TMP2:%.*]] = trunc i32 [[VMINV_I]] to i16
187 // CHECK:   ret i16 [[TMP2]]
test_vminv_u16(uint16x4_t a)188 uint16_t test_vminv_u16(uint16x4_t a) {
189   return vminv_u16(a);
190 }
191 
192 // CHECK-LABEL: define i8 @test_vminvq_s8(<16 x i8> %a) #1 {
193 // CHECK:   [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8> %a) #3
194 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8
195 // CHECK:   ret i8 [[TMP0]]
test_vminvq_s8(int8x16_t a)196 int8_t test_vminvq_s8(int8x16_t a) {
197   return vminvq_s8(a);
198 }
199 
200 // CHECK-LABEL: define i16 @test_vminvq_s16(<8 x i16> %a) #1 {
201 // CHECK:   [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> %a) #3
202 // CHECK:   [[TMP2:%.*]] = trunc i32 [[VMINV_I]] to i16
203 // CHECK:   ret i16 [[TMP2]]
test_vminvq_s16(int16x8_t a)204 int16_t test_vminvq_s16(int16x8_t a) {
205   return vminvq_s16(a);
206 }
207 
208 // CHECK-LABEL: define i32 @test_vminvq_s32(<4 x i32> %a) #1 {
209 // CHECK:   [[VMINVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32> %a) #3
210 // CHECK:   ret i32 [[VMINVQ_S32_I]]
test_vminvq_s32(int32x4_t a)211 int32_t test_vminvq_s32(int32x4_t a) {
212   return vminvq_s32(a);
213 }
214 
215 // CHECK-LABEL: define i8 @test_vminvq_u8(<16 x i8> %a) #1 {
216 // CHECK:   [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> %a) #3
217 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8
218 // CHECK:   ret i8 [[TMP0]]
test_vminvq_u8(uint8x16_t a)219 uint8_t test_vminvq_u8(uint8x16_t a) {
220   return vminvq_u8(a);
221 }
222 
223 // CHECK-LABEL: define i16 @test_vminvq_u16(<8 x i16> %a) #1 {
224 // CHECK:   [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16> %a) #3
225 // CHECK:   [[TMP2:%.*]] = trunc i32 [[VMINV_I]] to i16
226 // CHECK:   ret i16 [[TMP2]]
test_vminvq_u16(uint16x8_t a)227 uint16_t test_vminvq_u16(uint16x8_t a) {
228   return vminvq_u16(a);
229 }
230 
231 // CHECK-LABEL: define i32 @test_vminvq_u32(<4 x i32> %a) #1 {
232 // CHECK:   [[VMINVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v4i32(<4 x i32> %a) #3
233 // CHECK:   ret i32 [[VMINVQ_U32_I]]
test_vminvq_u32(uint32x4_t a)234 uint32_t test_vminvq_u32(uint32x4_t a) {
235   return vminvq_u32(a);
236 }
237 
238 // CHECK-LABEL: define i8 @test_vaddv_s8(<8 x i8> %a) #0 {
239 // CHECK:   [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8> %a) #3
240 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8
241 // CHECK:   ret i8 [[TMP0]]
test_vaddv_s8(int8x8_t a)242 int8_t test_vaddv_s8(int8x8_t a) {
243   return vaddv_s8(a);
244 }
245 
246 // CHECK-LABEL: define i16 @test_vaddv_s16(<4 x i16> %a) #0 {
247 // CHECK:   [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16> %a) #3
248 // CHECK:   [[TMP2:%.*]] = trunc i32 [[VADDV_I]] to i16
249 // CHECK:   ret i16 [[TMP2]]
test_vaddv_s16(int16x4_t a)250 int16_t test_vaddv_s16(int16x4_t a) {
251   return vaddv_s16(a);
252 }
253 
254 // CHECK-LABEL: define i8 @test_vaddv_u8(<8 x i8> %a) #0 {
255 // CHECK:   [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v8i8(<8 x i8> %a) #3
256 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8
257 // CHECK:   ret i8 [[TMP0]]
test_vaddv_u8(uint8x8_t a)258 uint8_t test_vaddv_u8(uint8x8_t a) {
259   return vaddv_u8(a);
260 }
261 
262 // CHECK-LABEL: define i16 @test_vaddv_u16(<4 x i16> %a) #0 {
263 // CHECK:   [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v4i16(<4 x i16> %a) #3
264 // CHECK:   [[TMP2:%.*]] = trunc i32 [[VADDV_I]] to i16
265 // CHECK:   ret i16 [[TMP2]]
test_vaddv_u16(uint16x4_t a)266 uint16_t test_vaddv_u16(uint16x4_t a) {
267   return vaddv_u16(a);
268 }
269 
270 // CHECK-LABEL: define i8 @test_vaddvq_s8(<16 x i8> %a) #1 {
271 // CHECK:   [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8> %a) #3
272 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8
273 // CHECK:   ret i8 [[TMP0]]
test_vaddvq_s8(int8x16_t a)274 int8_t test_vaddvq_s8(int8x16_t a) {
275   return vaddvq_s8(a);
276 }
277 
278 // CHECK-LABEL: define i16 @test_vaddvq_s16(<8 x i16> %a) #1 {
279 // CHECK:   [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16> %a) #3
280 // CHECK:   [[TMP2:%.*]] = trunc i32 [[VADDV_I]] to i16
281 // CHECK:   ret i16 [[TMP2]]
test_vaddvq_s16(int16x8_t a)282 int16_t test_vaddvq_s16(int16x8_t a) {
283   return vaddvq_s16(a);
284 }
285 
286 // CHECK-LABEL: define i32 @test_vaddvq_s32(<4 x i32> %a) #1 {
287 // CHECK:   [[VADDVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32> %a) #3
288 // CHECK:   ret i32 [[VADDVQ_S32_I]]
test_vaddvq_s32(int32x4_t a)289 int32_t test_vaddvq_s32(int32x4_t a) {
290   return vaddvq_s32(a);
291 }
292 
293 // CHECK-LABEL: define i8 @test_vaddvq_u8(<16 x i8> %a) #1 {
294 // CHECK:   [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v16i8(<16 x i8> %a) #3
295 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8
296 // CHECK:   ret i8 [[TMP0]]
test_vaddvq_u8(uint8x16_t a)297 uint8_t test_vaddvq_u8(uint8x16_t a) {
298   return vaddvq_u8(a);
299 }
300 
301 // CHECK-LABEL: define i16 @test_vaddvq_u16(<8 x i16> %a) #1 {
302 // CHECK:   [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v8i16(<8 x i16> %a) #3
303 // CHECK:   [[TMP2:%.*]] = trunc i32 [[VADDV_I]] to i16
304 // CHECK:   ret i16 [[TMP2]]
test_vaddvq_u16(uint16x8_t a)305 uint16_t test_vaddvq_u16(uint16x8_t a) {
306   return vaddvq_u16(a);
307 }
308 
309 // CHECK-LABEL: define i32 @test_vaddvq_u32(<4 x i32> %a) #1 {
310 // CHECK:   [[VADDVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v4i32(<4 x i32> %a) #3
311 // CHECK:   ret i32 [[VADDVQ_U32_I]]
test_vaddvq_u32(uint32x4_t a)312 uint32_t test_vaddvq_u32(uint32x4_t a) {
313   return vaddvq_u32(a);
314 }
315 
316 // CHECK-LABEL: define float @test_vmaxvq_f32(<4 x float> %a) #1 {
317 // CHECK:   [[VMAXVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fmaxv.f32.v4f32(<4 x float> %a) #3
318 // CHECK:   ret float [[VMAXVQ_F32_I]]
test_vmaxvq_f32(float32x4_t a)319 float32_t test_vmaxvq_f32(float32x4_t a) {
320   return vmaxvq_f32(a);
321 }
322 
323 // CHECK-LABEL: define float @test_vminvq_f32(<4 x float> %a) #1 {
324 // CHECK:   [[VMINVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float> %a) #3
325 // CHECK:   ret float [[VMINVQ_F32_I]]
test_vminvq_f32(float32x4_t a)326 float32_t test_vminvq_f32(float32x4_t a) {
327   return vminvq_f32(a);
328 }
329 
330 // CHECK-LABEL: define float @test_vmaxnmvq_f32(<4 x float> %a) #1 {
331 // CHECK:   [[VMAXNMVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fmaxnmv.f32.v4f32(<4 x float> %a) #3
332 // CHECK:   ret float [[VMAXNMVQ_F32_I]]
test_vmaxnmvq_f32(float32x4_t a)333 float32_t test_vmaxnmvq_f32(float32x4_t a) {
334   return vmaxnmvq_f32(a);
335 }
336 
337 // CHECK-LABEL: define float @test_vminnmvq_f32(<4 x float> %a) #1 {
338 // CHECK:   [[VMINNMVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fminnmv.f32.v4f32(<4 x float> %a) #3
339 // CHECK:   ret float [[VMINNMVQ_F32_I]]
test_vminnmvq_f32(float32x4_t a)340 float32_t test_vminnmvq_f32(float32x4_t a) {
341   return vminnmvq_f32(a);
342 }
343 
344 // CHECK: attributes #0 ={{.*}}"min-legal-vector-width"="64"
345 // CHECK: attributes #1 ={{.*}}"min-legal-vector-width"="128"
346