1 // REQUIRES: aarch64-registered-target
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s
4 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s
6 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -o - %s >/dev/null
7 #include <arm_sve.h>
8
9 #ifdef SVE_OVERLOADED_FORMS
10 // A simple used,unused... macro, long enough to represent any SVE builtin.
11 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
12 #else
13 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
14 #endif
15
test_sveor_s8_z(svbool_t pg,svint8_t op1,svint8_t op2)16 svint8_t test_sveor_s8_z(svbool_t pg, svint8_t op1, svint8_t op2)
17 {
18 // CHECK-LABEL: test_sveor_s8_z
19 // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
20 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
21 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
22 return SVE_ACLE_FUNC(sveor,_s8,_z,)(pg, op1, op2);
23 }
24
test_sveor_s16_z(svbool_t pg,svint16_t op1,svint16_t op2)25 svint16_t test_sveor_s16_z(svbool_t pg, svint16_t op1, svint16_t op2)
26 {
27 // CHECK-LABEL: test_sveor_s16_z
28 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
29 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
30 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
31 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
32 return SVE_ACLE_FUNC(sveor,_s16,_z,)(pg, op1, op2);
33 }
34
test_sveor_s32_z(svbool_t pg,svint32_t op1,svint32_t op2)35 svint32_t test_sveor_s32_z(svbool_t pg, svint32_t op1, svint32_t op2)
36 {
37 // CHECK-LABEL: test_sveor_s32_z
38 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
39 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
40 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
41 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
42 return SVE_ACLE_FUNC(sveor,_s32,_z,)(pg, op1, op2);
43 }
44
test_sveor_s64_z(svbool_t pg,svint64_t op1,svint64_t op2)45 svint64_t test_sveor_s64_z(svbool_t pg, svint64_t op1, svint64_t op2)
46 {
47 // CHECK-LABEL: test_sveor_s64_z
48 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
49 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
50 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
51 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
52 return SVE_ACLE_FUNC(sveor,_s64,_z,)(pg, op1, op2);
53 }
54
test_sveor_u8_z(svbool_t pg,svuint8_t op1,svuint8_t op2)55 svuint8_t test_sveor_u8_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
56 {
57 // CHECK-LABEL: test_sveor_u8_z
58 // CHECK: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
59 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %op2)
60 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
61 return SVE_ACLE_FUNC(sveor,_u8,_z,)(pg, op1, op2);
62 }
63
test_sveor_u16_z(svbool_t pg,svuint16_t op1,svuint16_t op2)64 svuint16_t test_sveor_u16_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
65 {
66 // CHECK-LABEL: test_sveor_u16_z
67 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
68 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
69 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %op2)
70 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
71 return SVE_ACLE_FUNC(sveor,_u16,_z,)(pg, op1, op2);
72 }
73
test_sveor_u32_z(svbool_t pg,svuint32_t op1,svuint32_t op2)74 svuint32_t test_sveor_u32_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
75 {
76 // CHECK-LABEL: test_sveor_u32_z
77 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
78 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
79 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %op2)
80 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
81 return SVE_ACLE_FUNC(sveor,_u32,_z,)(pg, op1, op2);
82 }
83
test_sveor_u64_z(svbool_t pg,svuint64_t op1,svuint64_t op2)84 svuint64_t test_sveor_u64_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
85 {
86 // CHECK-LABEL: test_sveor_u64_z
87 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
88 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
89 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %op2)
90 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
91 return SVE_ACLE_FUNC(sveor,_u64,_z,)(pg, op1, op2);
92 }
93
test_sveor_s8_m(svbool_t pg,svint8_t op1,svint8_t op2)94 svint8_t test_sveor_s8_m(svbool_t pg, svint8_t op1, svint8_t op2)
95 {
96 // CHECK-LABEL: test_sveor_s8_m
97 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
98 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
99 return SVE_ACLE_FUNC(sveor,_s8,_m,)(pg, op1, op2);
100 }
101
test_sveor_s16_m(svbool_t pg,svint16_t op1,svint16_t op2)102 svint16_t test_sveor_s16_m(svbool_t pg, svint16_t op1, svint16_t op2)
103 {
104 // CHECK-LABEL: test_sveor_s16_m
105 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
106 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
107 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
108 return SVE_ACLE_FUNC(sveor,_s16,_m,)(pg, op1, op2);
109 }
110
test_sveor_s32_m(svbool_t pg,svint32_t op1,svint32_t op2)111 svint32_t test_sveor_s32_m(svbool_t pg, svint32_t op1, svint32_t op2)
112 {
113 // CHECK-LABEL: test_sveor_s32_m
114 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
115 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
116 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
117 return SVE_ACLE_FUNC(sveor,_s32,_m,)(pg, op1, op2);
118 }
119
test_sveor_s64_m(svbool_t pg,svint64_t op1,svint64_t op2)120 svint64_t test_sveor_s64_m(svbool_t pg, svint64_t op1, svint64_t op2)
121 {
122 // CHECK-LABEL: test_sveor_s64_m
123 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
124 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
125 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
126 return SVE_ACLE_FUNC(sveor,_s64,_m,)(pg, op1, op2);
127 }
128
test_sveor_u8_m(svbool_t pg,svuint8_t op1,svuint8_t op2)129 svuint8_t test_sveor_u8_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
130 {
131 // CHECK-LABEL: test_sveor_u8_m
132 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
133 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
134 return SVE_ACLE_FUNC(sveor,_u8,_m,)(pg, op1, op2);
135 }
136
test_sveor_u16_m(svbool_t pg,svuint16_t op1,svuint16_t op2)137 svuint16_t test_sveor_u16_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
138 {
139 // CHECK-LABEL: test_sveor_u16_m
140 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
141 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
142 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
143 return SVE_ACLE_FUNC(sveor,_u16,_m,)(pg, op1, op2);
144 }
145
test_sveor_u32_m(svbool_t pg,svuint32_t op1,svuint32_t op2)146 svuint32_t test_sveor_u32_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
147 {
148 // CHECK-LABEL: test_sveor_u32_m
149 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
150 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
151 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
152 return SVE_ACLE_FUNC(sveor,_u32,_m,)(pg, op1, op2);
153 }
154
test_sveor_u64_m(svbool_t pg,svuint64_t op1,svuint64_t op2)155 svuint64_t test_sveor_u64_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
156 {
157 // CHECK-LABEL: test_sveor_u64_m
158 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
159 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
160 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
161 return SVE_ACLE_FUNC(sveor,_u64,_m,)(pg, op1, op2);
162 }
163
test_sveor_s8_x(svbool_t pg,svint8_t op1,svint8_t op2)164 svint8_t test_sveor_s8_x(svbool_t pg, svint8_t op1, svint8_t op2)
165 {
166 // CHECK-LABEL: test_sveor_s8_x
167 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
168 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
169 return SVE_ACLE_FUNC(sveor,_s8,_x,)(pg, op1, op2);
170 }
171
test_sveor_s16_x(svbool_t pg,svint16_t op1,svint16_t op2)172 svint16_t test_sveor_s16_x(svbool_t pg, svint16_t op1, svint16_t op2)
173 {
174 // CHECK-LABEL: test_sveor_s16_x
175 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
176 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
177 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
178 return SVE_ACLE_FUNC(sveor,_s16,_x,)(pg, op1, op2);
179 }
180
test_sveor_s32_x(svbool_t pg,svint32_t op1,svint32_t op2)181 svint32_t test_sveor_s32_x(svbool_t pg, svint32_t op1, svint32_t op2)
182 {
183 // CHECK-LABEL: test_sveor_s32_x
184 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
185 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
186 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
187 return SVE_ACLE_FUNC(sveor,_s32,_x,)(pg, op1, op2);
188 }
189
test_sveor_s64_x(svbool_t pg,svint64_t op1,svint64_t op2)190 svint64_t test_sveor_s64_x(svbool_t pg, svint64_t op1, svint64_t op2)
191 {
192 // CHECK-LABEL: test_sveor_s64_x
193 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
194 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
195 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
196 return SVE_ACLE_FUNC(sveor,_s64,_x,)(pg, op1, op2);
197 }
198
test_sveor_u8_x(svbool_t pg,svuint8_t op1,svuint8_t op2)199 svuint8_t test_sveor_u8_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
200 {
201 // CHECK-LABEL: test_sveor_u8_x
202 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
203 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
204 return SVE_ACLE_FUNC(sveor,_u8,_x,)(pg, op1, op2);
205 }
206
test_sveor_u16_x(svbool_t pg,svuint16_t op1,svuint16_t op2)207 svuint16_t test_sveor_u16_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
208 {
209 // CHECK-LABEL: test_sveor_u16_x
210 // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
211 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
212 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
213 return SVE_ACLE_FUNC(sveor,_u16,_x,)(pg, op1, op2);
214 }
215
test_sveor_u32_x(svbool_t pg,svuint32_t op1,svuint32_t op2)216 svuint32_t test_sveor_u32_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
217 {
218 // CHECK-LABEL: test_sveor_u32_x
219 // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
220 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
221 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
222 return SVE_ACLE_FUNC(sveor,_u32,_x,)(pg, op1, op2);
223 }
224
test_sveor_u64_x(svbool_t pg,svuint64_t op1,svuint64_t op2)225 svuint64_t test_sveor_u64_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
226 {
227 // CHECK-LABEL: test_sveor_u64_x
228 // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
229 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2)
230 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
231 return SVE_ACLE_FUNC(sveor,_u64,_x,)(pg, op1, op2);
232 }
233
test_sveor_n_s8_z(svbool_t pg,svint8_t op1,int8_t op2)234 svint8_t test_sveor_n_s8_z(svbool_t pg, svint8_t op1, int8_t op2)
235 {
236 // CHECK-LABEL: test_sveor_n_s8_z
237 // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
238 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %[[DUP]])
239 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
240 return SVE_ACLE_FUNC(sveor,_n_s8,_z,)(pg, op1, op2);
241 }
242
test_sveor_n_s16_z(svbool_t pg,svint16_t op1,int16_t op2)243 svint16_t test_sveor_n_s16_z(svbool_t pg, svint16_t op1, int16_t op2)
244 {
245 // CHECK-LABEL: test_sveor_n_s16_z
246 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
247 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
248 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
249 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %[[DUP]])
250 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
251 return SVE_ACLE_FUNC(sveor,_n_s16,_z,)(pg, op1, op2);
252 }
253
test_sveor_n_s32_z(svbool_t pg,svint32_t op1,int32_t op2)254 svint32_t test_sveor_n_s32_z(svbool_t pg, svint32_t op1, int32_t op2)
255 {
256 // CHECK-LABEL: test_sveor_n_s32_z
257 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
258 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
259 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
260 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %[[DUP]])
261 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
262 return SVE_ACLE_FUNC(sveor,_n_s32,_z,)(pg, op1, op2);
263 }
264
test_sveor_n_s64_z(svbool_t pg,svint64_t op1,int64_t op2)265 svint64_t test_sveor_n_s64_z(svbool_t pg, svint64_t op1, int64_t op2)
266 {
267 // CHECK-LABEL: test_sveor_n_s64_z
268 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
269 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
270 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
271 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %[[DUP]])
272 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
273 return SVE_ACLE_FUNC(sveor,_n_s64,_z,)(pg, op1, op2);
274 }
275
test_sveor_n_u8_z(svbool_t pg,svuint8_t op1,uint8_t op2)276 svuint8_t test_sveor_n_u8_z(svbool_t pg, svuint8_t op1, uint8_t op2)
277 {
278 // CHECK-LABEL: test_sveor_n_u8_z
279 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
280 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> zeroinitializer)
281 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %[[SEL]], <vscale x 16 x i8> %[[DUP]])
282 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
283 return SVE_ACLE_FUNC(sveor,_n_u8,_z,)(pg, op1, op2);
284 }
285
test_sveor_n_u16_z(svbool_t pg,svuint16_t op1,uint16_t op2)286 svuint16_t test_sveor_n_u16_z(svbool_t pg, svuint16_t op1, uint16_t op2)
287 {
288 // CHECK-LABEL: test_sveor_n_u16_z
289 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
290 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
291 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> zeroinitializer)
292 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %[[SEL]], <vscale x 8 x i16> %[[DUP]])
293 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
294 return SVE_ACLE_FUNC(sveor,_n_u16,_z,)(pg, op1, op2);
295 }
296
test_sveor_n_u32_z(svbool_t pg,svuint32_t op1,uint32_t op2)297 svuint32_t test_sveor_n_u32_z(svbool_t pg, svuint32_t op1, uint32_t op2)
298 {
299 // CHECK-LABEL: test_sveor_n_u32_z
300 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
301 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
302 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> zeroinitializer)
303 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %[[SEL]], <vscale x 4 x i32> %[[DUP]])
304 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
305 return SVE_ACLE_FUNC(sveor,_n_u32,_z,)(pg, op1, op2);
306 }
307
test_sveor_n_u64_z(svbool_t pg,svuint64_t op1,uint64_t op2)308 svuint64_t test_sveor_n_u64_z(svbool_t pg, svuint64_t op1, uint64_t op2)
309 {
310 // CHECK-LABEL: test_sveor_n_u64_z
311 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
312 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
313 // CHECK-DAG: %[[SEL:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> zeroinitializer)
314 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %[[SEL]], <vscale x 2 x i64> %[[DUP]])
315 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
316 return SVE_ACLE_FUNC(sveor,_n_u64,_z,)(pg, op1, op2);
317 }
318
test_sveor_n_s8_m(svbool_t pg,svint8_t op1,int8_t op2)319 svint8_t test_sveor_n_s8_m(svbool_t pg, svint8_t op1, int8_t op2)
320 {
321 // CHECK-LABEL: test_sveor_n_s8_m
322 // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
323 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
324 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
325 return SVE_ACLE_FUNC(sveor,_n_s8,_m,)(pg, op1, op2);
326 }
327
test_sveor_n_s16_m(svbool_t pg,svint16_t op1,int16_t op2)328 svint16_t test_sveor_n_s16_m(svbool_t pg, svint16_t op1, int16_t op2)
329 {
330 // CHECK-LABEL: test_sveor_n_s16_m
331 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
332 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
333 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
334 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
335 return SVE_ACLE_FUNC(sveor,_n_s16,_m,)(pg, op1, op2);
336 }
337
test_sveor_n_s32_m(svbool_t pg,svint32_t op1,int32_t op2)338 svint32_t test_sveor_n_s32_m(svbool_t pg, svint32_t op1, int32_t op2)
339 {
340 // CHECK-LABEL: test_sveor_n_s32_m
341 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
342 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
343 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
344 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
345 return SVE_ACLE_FUNC(sveor,_n_s32,_m,)(pg, op1, op2);
346 }
347
test_sveor_n_s64_m(svbool_t pg,svint64_t op1,int64_t op2)348 svint64_t test_sveor_n_s64_m(svbool_t pg, svint64_t op1, int64_t op2)
349 {
350 // CHECK-LABEL: test_sveor_n_s64_m
351 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
352 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
353 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
354 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
355 return SVE_ACLE_FUNC(sveor,_n_s64,_m,)(pg, op1, op2);
356 }
357
test_sveor_n_u8_m(svbool_t pg,svuint8_t op1,uint8_t op2)358 svuint8_t test_sveor_n_u8_m(svbool_t pg, svuint8_t op1, uint8_t op2)
359 {
360 // CHECK-LABEL: test_sveor_n_u8_m
361 // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
362 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
363 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
364 return SVE_ACLE_FUNC(sveor,_n_u8,_m,)(pg, op1, op2);
365 }
366
test_sveor_n_u16_m(svbool_t pg,svuint16_t op1,uint16_t op2)367 svuint16_t test_sveor_n_u16_m(svbool_t pg, svuint16_t op1, uint16_t op2)
368 {
369 // CHECK-LABEL: test_sveor_n_u16_m
370 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
371 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
372 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
373 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
374 return SVE_ACLE_FUNC(sveor,_n_u16,_m,)(pg, op1, op2);
375 }
376
test_sveor_n_u32_m(svbool_t pg,svuint32_t op1,uint32_t op2)377 svuint32_t test_sveor_n_u32_m(svbool_t pg, svuint32_t op1, uint32_t op2)
378 {
379 // CHECK-LABEL: test_sveor_n_u32_m
380 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
381 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
382 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
383 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
384 return SVE_ACLE_FUNC(sveor,_n_u32,_m,)(pg, op1, op2);
385 }
386
test_sveor_n_u64_m(svbool_t pg,svuint64_t op1,uint64_t op2)387 svuint64_t test_sveor_n_u64_m(svbool_t pg, svuint64_t op1, uint64_t op2)
388 {
389 // CHECK-LABEL: test_sveor_n_u64_m
390 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
391 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
392 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
393 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
394 return SVE_ACLE_FUNC(sveor,_n_u64,_m,)(pg, op1, op2);
395 }
396
test_sveor_n_s8_x(svbool_t pg,svint8_t op1,int8_t op2)397 svint8_t test_sveor_n_s8_x(svbool_t pg, svint8_t op1, int8_t op2)
398 {
399 // CHECK-LABEL: test_sveor_n_s8_x
400 // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
401 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
402 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
403 return SVE_ACLE_FUNC(sveor,_n_s8,_x,)(pg, op1, op2);
404 }
405
test_sveor_n_s16_x(svbool_t pg,svint16_t op1,int16_t op2)406 svint16_t test_sveor_n_s16_x(svbool_t pg, svint16_t op1, int16_t op2)
407 {
408 // CHECK-LABEL: test_sveor_n_s16_x
409 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
410 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
411 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
412 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
413 return SVE_ACLE_FUNC(sveor,_n_s16,_x,)(pg, op1, op2);
414 }
415
test_sveor_n_s32_x(svbool_t pg,svint32_t op1,int32_t op2)416 svint32_t test_sveor_n_s32_x(svbool_t pg, svint32_t op1, int32_t op2)
417 {
418 // CHECK-LABEL: test_sveor_n_s32_x
419 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
420 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
421 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
422 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
423 return SVE_ACLE_FUNC(sveor,_n_s32,_x,)(pg, op1, op2);
424 }
425
test_sveor_n_s64_x(svbool_t pg,svint64_t op1,int64_t op2)426 svint64_t test_sveor_n_s64_x(svbool_t pg, svint64_t op1, int64_t op2)
427 {
428 // CHECK-LABEL: test_sveor_n_s64_x
429 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
430 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
431 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
432 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
433 return SVE_ACLE_FUNC(sveor,_n_s64,_x,)(pg, op1, op2);
434 }
435
test_sveor_n_u8_x(svbool_t pg,svuint8_t op1,uint8_t op2)436 svuint8_t test_sveor_n_u8_x(svbool_t pg, svuint8_t op1, uint8_t op2)
437 {
438 // CHECK-LABEL: test_sveor_n_u8_x
439 // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
440 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
441 // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
442 return SVE_ACLE_FUNC(sveor,_n_u8,_x,)(pg, op1, op2);
443 }
444
test_sveor_n_u16_x(svbool_t pg,svuint16_t op1,uint16_t op2)445 svuint16_t test_sveor_n_u16_x(svbool_t pg, svuint16_t op1, uint16_t op2)
446 {
447 // CHECK-LABEL: test_sveor_n_u16_x
448 // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
449 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
450 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.nxv8i16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
451 // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
452 return SVE_ACLE_FUNC(sveor,_n_u16,_x,)(pg, op1, op2);
453 }
454
test_sveor_n_u32_x(svbool_t pg,svuint32_t op1,uint32_t op2)455 svuint32_t test_sveor_n_u32_x(svbool_t pg, svuint32_t op1, uint32_t op2)
456 {
457 // CHECK-LABEL: test_sveor_n_u32_x
458 // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
459 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
460 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.nxv4i32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
461 // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
462 return SVE_ACLE_FUNC(sveor,_n_u32,_x,)(pg, op1, op2);
463 }
464
test_sveor_n_u64_x(svbool_t pg,svuint64_t op1,uint64_t op2)465 svuint64_t test_sveor_n_u64_x(svbool_t pg, svuint64_t op1, uint64_t op2)
466 {
467 // CHECK-LABEL: test_sveor_n_u64_x
468 // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
469 // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2)
470 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.nxv2i64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]])
471 // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
472 return SVE_ACLE_FUNC(sveor,_n_u64,_x,)(pg, op1, op2);
473 }
474
test_sveor_b_z(svbool_t pg,svbool_t op1,svbool_t op2)475 svbool_t test_sveor_b_z(svbool_t pg, svbool_t op1, svbool_t op2)
476 {
477 // CHECK-LABEL: test_sveor_b_z
478 // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.eor.z.nxv16i1(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %op1, <vscale x 16 x i1> %op2)
479 // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
480 return SVE_ACLE_FUNC(sveor,_b,_z,)(pg, op1, op2);
481 }
482