1; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s
2
3define signext i8 @test_vminv_s8(<8 x i8> %a1) {
4; CHECK: test_vminv_s8
5; CHECK: sminv.8b b[[REGNUM:[0-9]+]], v0
6; CHECK-NEXT: smov.b w0, v[[REGNUM]][0]
7; CHECK-NEXT: ret
8entry:
9  %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8> %a1)
10  %0 = trunc i32 %vminv.i to i8
11  ret i8 %0
12}
13
14define signext i16 @test_vminv_s16(<4 x i16> %a1) {
15; CHECK: test_vminv_s16
16; CHECK: sminv.4h h[[REGNUM:[0-9]+]], v0
17; CHECK-NEXT: smov.h w0, v[[REGNUM]][0]
18; CHECK-NEXT: ret
19entry:
20  %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16> %a1)
21  %0 = trunc i32 %vminv.i to i16
22  ret i16 %0
23}
24
25define i32 @test_vminv_s32(<2 x i32> %a1) {
26; CHECK: test_vminv_s32
27; 2 x i32 is not supported by the ISA, thus, this is a special case
28; CHECK: sminp.2s v[[REGNUM:[0-9]+]], v0, v0
29; CHECK-NEXT: fmov w0, s[[REGNUM]]
30; CHECK-NEXT: ret
31entry:
32  %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v2i32(<2 x i32> %a1)
33  ret i32 %vminv.i
34}
35
36define signext i8 @test_vminvq_s8(<16 x i8> %a1) {
37; CHECK: test_vminvq_s8
38; CHECK: sminv.16b b[[REGNUM:[0-9]+]], v0
39; CHECK-NEXT: smov.b w0, v[[REGNUM]][0]
40; CHECK-NEXT: ret
41entry:
42  %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8> %a1)
43  %0 = trunc i32 %vminv.i to i8
44  ret i8 %0
45}
46
47define signext i16 @test_vminvq_s16(<8 x i16> %a1) {
48; CHECK: test_vminvq_s16
49; CHECK: sminv.8h h[[REGNUM:[0-9]+]], v0
50; CHECK-NEXT: smov.h w0, v[[REGNUM]][0]
51; CHECK-NEXT: ret
52entry:
53  %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> %a1)
54  %0 = trunc i32 %vminv.i to i16
55  ret i16 %0
56}
57
58define i32 @test_vminvq_s32(<4 x i32> %a1) {
59; CHECK: test_vminvq_s32
60; CHECK: sminv.4s [[REGNUM:s[0-9]+]], v0
61; CHECK-NEXT: fmov w0, [[REGNUM]]
62; CHECK-NEXT: ret
63entry:
64  %vminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32> %a1)
65  ret i32 %vminv.i
66}
67
68declare i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32>)
69declare i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16>)
70declare i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8>)
71declare i32 @llvm.aarch64.neon.sminv.i32.v2i32(<2 x i32>)
72declare i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16>)
73declare i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8>)
74
75