1; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s 2>%t | FileCheck %s
2; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t
3
4; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it.
5; WARN-NOT: warning
6
7;
8; CNOT
9;
10
11define <vscale x 16 x i8> @cnot_i8(<vscale x 16 x i8> %a, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b) {
12; CHECK-LABEL: cnot_i8:
13; CHECK: cnot z0.b, p0/m, z1.b
14; CHECK-NEXT: ret
15  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.cnot.nxv16i8(<vscale x 16 x i8> %a,
16                                                                <vscale x 16 x i1> %pg,
17                                                                <vscale x 16 x i8> %b)
18  ret <vscale x 16 x i8> %out
19}
20
21define <vscale x 8 x i16> @cnot_i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b) {
22; CHECK-LABEL: cnot_i16:
23; CHECK: cnot z0.h, p0/m, z1.h
24; CHECK-NEXT: ret
25  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.cnot.nxv8i16(<vscale x 8 x i16> %a,
26                                                                <vscale x 8 x i1> %pg,
27                                                                <vscale x 8 x i16> %b)
28  ret <vscale x 8 x i16> %out
29}
30
31define <vscale x 4 x i32> @cnot_i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b) {
32; CHECK-LABEL: cnot_i32:
33; CHECK: cnot z0.s, p0/m, z1.s
34; CHECK-NEXT: ret
35  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.cnot.nxv4i32(<vscale x 4 x i32> %a,
36                                                                <vscale x 4 x i1> %pg,
37                                                                <vscale x 4 x i32> %b)
38  ret <vscale x 4 x i32> %out
39}
40
41define <vscale x 2 x i64> @cnot_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) {
42; CHECK-LABEL: cnot_i64:
43; CHECK: cnot z0.d, p0/m, z1.d
44; CHECK-NEXT: ret
45  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.cnot.nxv2i64(<vscale x 2 x i64> %a,
46                                                                <vscale x 2 x i1> %pg,
47                                                                <vscale x 2 x i64> %b)
48  ret <vscale x 2 x i64> %out
49}
50
51;
52; NOT
53;
54
55define <vscale x 16 x i8> @not_i8(<vscale x 16 x i8> %a, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b) {
56; CHECK-LABEL: not_i8:
57; CHECK: not z0.b, p0/m, z1.b
58; CHECK-NEXT: ret
59  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.not.nxv16i8(<vscale x 16 x i8> %a,
60                                                               <vscale x 16 x i1> %pg,
61                                                               <vscale x 16 x i8> %b)
62  ret <vscale x 16 x i8> %out
63}
64
65define <vscale x 8 x i16> @not_i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b) {
66; CHECK-LABEL: not_i16:
67; CHECK: not z0.h, p0/m, z1.h
68; CHECK-NEXT: ret
69  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.not.nxv8i16(<vscale x 8 x i16> %a,
70                                                               <vscale x 8 x i1> %pg,
71                                                               <vscale x 8 x i16> %b)
72  ret <vscale x 8 x i16> %out
73}
74
75define <vscale x 4 x i32> @not_i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b) {
76; CHECK-LABEL: not_i32:
77; CHECK: not z0.s, p0/m, z1.s
78; CHECK-NEXT: ret
79  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.not.nxv4i32(<vscale x 4 x i32> %a,
80                                                               <vscale x 4 x i1> %pg,
81                                                               <vscale x 4 x i32> %b)
82  ret <vscale x 4 x i32> %out
83}
84
85define <vscale x 2 x i64> @not_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) {
86; CHECK-LABEL: not_i64:
87; CHECK: not z0.d, p0/m, z1.d
88; CHECK-NEXT: ret
89  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.not.nxv2i64(<vscale x 2 x i64> %a,
90                                                               <vscale x 2 x i1> %pg,
91                                                               <vscale x 2 x i64> %b)
92  ret <vscale x 2 x i64> %out
93}
94
95declare <vscale x 16 x i8> @llvm.aarch64.sve.cnot.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, <vscale x 16 x i8>)
96declare <vscale x 8 x i16> @llvm.aarch64.sve.cnot.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x i16>)
97declare <vscale x 4 x i32> @llvm.aarch64.sve.cnot.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>)
98declare <vscale x 2 x i64> @llvm.aarch64.sve.cnot.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>)
99
100declare <vscale x 16 x i8> @llvm.aarch64.sve.not.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, <vscale x 16 x i8>)
101declare <vscale x 8 x i16> @llvm.aarch64.sve.not.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x i16>)
102declare <vscale x 4 x i32> @llvm.aarch64.sve.not.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>)
103declare <vscale x 2 x i64> @llvm.aarch64.sve.not.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>)
104