1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
4 
5 #include <riscv_vector.h>
6 
7 //
8 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1(
9 // CHECK-RV64-NEXT:  entry:
10 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
11 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
12 //
test_vredxor_vs_i8mf8_i8m1(vint8m1_t dst,vint8mf8_t vector,vint8m1_t scalar,size_t vl)13 vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
14                                      vint8m1_t scalar, size_t vl) {
15   return vredxor_vs_i8mf8_i8m1(dst, vector, scalar, vl);
16 }
17 
18 //
19 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1(
20 // CHECK-RV64-NEXT:  entry:
21 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
22 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
23 //
test_vredxor_vs_i8mf4_i8m1(vint8m1_t dst,vint8mf4_t vector,vint8m1_t scalar,size_t vl)24 vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
25                                      vint8m1_t scalar, size_t vl) {
26   return vredxor_vs_i8mf4_i8m1(dst, vector, scalar, vl);
27 }
28 
29 //
30 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1(
31 // CHECK-RV64-NEXT:  entry:
32 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
33 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
34 //
test_vredxor_vs_i8mf2_i8m1(vint8m1_t dst,vint8mf2_t vector,vint8m1_t scalar,size_t vl)35 vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
36                                      vint8m1_t scalar, size_t vl) {
37   return vredxor_vs_i8mf2_i8m1(dst, vector, scalar, vl);
38 }
39 
40 //
41 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1(
42 // CHECK-RV64-NEXT:  entry:
43 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
44 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
45 //
test_vredxor_vs_i8m1_i8m1(vint8m1_t dst,vint8m1_t vector,vint8m1_t scalar,size_t vl)46 vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
47                                     vint8m1_t scalar, size_t vl) {
48   return vredxor_vs_i8m1_i8m1(dst, vector, scalar, vl);
49 }
50 
51 //
52 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1(
53 // CHECK-RV64-NEXT:  entry:
54 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
55 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
56 //
test_vredxor_vs_i8m2_i8m1(vint8m1_t dst,vint8m2_t vector,vint8m1_t scalar,size_t vl)57 vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
58                                     vint8m1_t scalar, size_t vl) {
59   return vredxor_vs_i8m2_i8m1(dst, vector, scalar, vl);
60 }
61 
62 //
63 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1(
64 // CHECK-RV64-NEXT:  entry:
65 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
66 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
67 //
test_vredxor_vs_i8m4_i8m1(vint8m1_t dst,vint8m4_t vector,vint8m1_t scalar,size_t vl)68 vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
69                                     vint8m1_t scalar, size_t vl) {
70   return vredxor_vs_i8m4_i8m1(dst, vector, scalar, vl);
71 }
72 
73 //
74 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1(
75 // CHECK-RV64-NEXT:  entry:
76 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
77 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
78 //
test_vredxor_vs_i8m8_i8m1(vint8m1_t dst,vint8m8_t vector,vint8m1_t scalar,size_t vl)79 vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
80                                     vint8m1_t scalar, size_t vl) {
81   return vredxor_vs_i8m8_i8m1(dst, vector, scalar, vl);
82 }
83 
84 //
85 // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1(
86 // CHECK-RV64-NEXT:  entry:
87 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
88 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
89 //
test_vredxor_vs_i16mf4_i16m1(vint16m1_t dst,vint16mf4_t vector,vint16m1_t scalar,size_t vl)90 vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
91                                         vint16m1_t scalar, size_t vl) {
92   return vredxor_vs_i16mf4_i16m1(dst, vector, scalar, vl);
93 }
94 
95 //
96 // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1(
97 // CHECK-RV64-NEXT:  entry:
98 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
99 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
100 //
test_vredxor_vs_i16mf2_i16m1(vint16m1_t dst,vint16mf2_t vector,vint16m1_t scalar,size_t vl)101 vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
102                                         vint16m1_t scalar, size_t vl) {
103   return vredxor_vs_i16mf2_i16m1(dst, vector, scalar, vl);
104 }
105 
106 //
107 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1(
108 // CHECK-RV64-NEXT:  entry:
109 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
110 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
111 //
test_vredxor_vs_i16m1_i16m1(vint16m1_t dst,vint16m1_t vector,vint16m1_t scalar,size_t vl)112 vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
113                                        vint16m1_t scalar, size_t vl) {
114   return vredxor_vs_i16m1_i16m1(dst, vector, scalar, vl);
115 }
116 
117 //
118 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1(
119 // CHECK-RV64-NEXT:  entry:
120 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
121 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
122 //
test_vredxor_vs_i16m2_i16m1(vint16m1_t dst,vint16m2_t vector,vint16m1_t scalar,size_t vl)123 vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
124                                        vint16m1_t scalar, size_t vl) {
125   return vredxor_vs_i16m2_i16m1(dst, vector, scalar, vl);
126 }
127 
128 //
129 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1(
130 // CHECK-RV64-NEXT:  entry:
131 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
132 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
133 //
test_vredxor_vs_i16m4_i16m1(vint16m1_t dst,vint16m4_t vector,vint16m1_t scalar,size_t vl)134 vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
135                                        vint16m1_t scalar, size_t vl) {
136   return vredxor_vs_i16m4_i16m1(dst, vector, scalar, vl);
137 }
138 
139 //
140 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1(
141 // CHECK-RV64-NEXT:  entry:
142 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
143 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
144 //
test_vredxor_vs_i16m8_i16m1(vint16m1_t dst,vint16m8_t vector,vint16m1_t scalar,size_t vl)145 vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
146                                        vint16m1_t scalar, size_t vl) {
147   return vredxor_vs_i16m8_i16m1(dst, vector, scalar, vl);
148 }
149 
150 //
151 // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1(
152 // CHECK-RV64-NEXT:  entry:
153 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
154 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
155 //
test_vredxor_vs_i32mf2_i32m1(vint32m1_t dst,vint32mf2_t vector,vint32m1_t scalar,size_t vl)156 vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
157                                         vint32m1_t scalar, size_t vl) {
158   return vredxor_vs_i32mf2_i32m1(dst, vector, scalar, vl);
159 }
160 
161 //
162 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1(
163 // CHECK-RV64-NEXT:  entry:
164 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
165 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
166 //
test_vredxor_vs_i32m1_i32m1(vint32m1_t dst,vint32m1_t vector,vint32m1_t scalar,size_t vl)167 vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
168                                        vint32m1_t scalar, size_t vl) {
169   return vredxor_vs_i32m1_i32m1(dst, vector, scalar, vl);
170 }
171 
172 //
173 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1(
174 // CHECK-RV64-NEXT:  entry:
175 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
176 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
177 //
test_vredxor_vs_i32m2_i32m1(vint32m1_t dst,vint32m2_t vector,vint32m1_t scalar,size_t vl)178 vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
179                                        vint32m1_t scalar, size_t vl) {
180   return vredxor_vs_i32m2_i32m1(dst, vector, scalar, vl);
181 }
182 
183 //
184 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1(
185 // CHECK-RV64-NEXT:  entry:
186 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
187 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
188 //
test_vredxor_vs_i32m4_i32m1(vint32m1_t dst,vint32m4_t vector,vint32m1_t scalar,size_t vl)189 vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
190                                        vint32m1_t scalar, size_t vl) {
191   return vredxor_vs_i32m4_i32m1(dst, vector, scalar, vl);
192 }
193 
194 //
195 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1(
196 // CHECK-RV64-NEXT:  entry:
197 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
198 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
199 //
test_vredxor_vs_i32m8_i32m1(vint32m1_t dst,vint32m8_t vector,vint32m1_t scalar,size_t vl)200 vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
201                                        vint32m1_t scalar, size_t vl) {
202   return vredxor_vs_i32m8_i32m1(dst, vector, scalar, vl);
203 }
204 
205 //
206 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1(
207 // CHECK-RV64-NEXT:  entry:
208 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
209 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
210 //
test_vredxor_vs_i64m1_i64m1(vint64m1_t dst,vint64m1_t vector,vint64m1_t scalar,size_t vl)211 vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
212                                        vint64m1_t scalar, size_t vl) {
213   return vredxor_vs_i64m1_i64m1(dst, vector, scalar, vl);
214 }
215 
216 //
217 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1(
218 // CHECK-RV64-NEXT:  entry:
219 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
220 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
221 //
test_vredxor_vs_i64m2_i64m1(vint64m1_t dst,vint64m2_t vector,vint64m1_t scalar,size_t vl)222 vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
223                                        vint64m1_t scalar, size_t vl) {
224   return vredxor_vs_i64m2_i64m1(dst, vector, scalar, vl);
225 }
226 
227 //
228 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1(
229 // CHECK-RV64-NEXT:  entry:
230 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
231 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
232 //
test_vredxor_vs_i64m4_i64m1(vint64m1_t dst,vint64m4_t vector,vint64m1_t scalar,size_t vl)233 vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
234                                        vint64m1_t scalar, size_t vl) {
235   return vredxor_vs_i64m4_i64m1(dst, vector, scalar, vl);
236 }
237 
238 //
239 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1(
240 // CHECK-RV64-NEXT:  entry:
241 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
242 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
243 //
test_vredxor_vs_i64m8_i64m1(vint64m1_t dst,vint64m8_t vector,vint64m1_t scalar,size_t vl)244 vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
245                                        vint64m1_t scalar, size_t vl) {
246   return vredxor_vs_i64m8_i64m1(dst, vector, scalar, vl);
247 }
248 
249 //
250 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1(
251 // CHECK-RV64-NEXT:  entry:
252 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
253 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
254 //
test_vredxor_vs_u8mf8_u8m1(vuint8m1_t dst,vuint8mf8_t vector,vuint8m1_t scalar,size_t vl)255 vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
256                                       vuint8m1_t scalar, size_t vl) {
257   return vredxor_vs_u8mf8_u8m1(dst, vector, scalar, vl);
258 }
259 
260 //
261 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1(
262 // CHECK-RV64-NEXT:  entry:
263 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
264 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
265 //
test_vredxor_vs_u8mf4_u8m1(vuint8m1_t dst,vuint8mf4_t vector,vuint8m1_t scalar,size_t vl)266 vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
267                                       vuint8m1_t scalar, size_t vl) {
268   return vredxor_vs_u8mf4_u8m1(dst, vector, scalar, vl);
269 }
270 
271 //
272 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1(
273 // CHECK-RV64-NEXT:  entry:
274 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
275 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
276 //
test_vredxor_vs_u8mf2_u8m1(vuint8m1_t dst,vuint8mf2_t vector,vuint8m1_t scalar,size_t vl)277 vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
278                                       vuint8m1_t scalar, size_t vl) {
279   return vredxor_vs_u8mf2_u8m1(dst, vector, scalar, vl);
280 }
281 
282 //
283 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1(
284 // CHECK-RV64-NEXT:  entry:
285 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
286 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
287 //
test_vredxor_vs_u8m1_u8m1(vuint8m1_t dst,vuint8m1_t vector,vuint8m1_t scalar,size_t vl)288 vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
289                                      vuint8m1_t scalar, size_t vl) {
290   return vredxor_vs_u8m1_u8m1(dst, vector, scalar, vl);
291 }
292 
293 //
294 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1(
295 // CHECK-RV64-NEXT:  entry:
296 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
297 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
298 //
test_vredxor_vs_u8m2_u8m1(vuint8m1_t dst,vuint8m2_t vector,vuint8m1_t scalar,size_t vl)299 vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
300                                      vuint8m1_t scalar, size_t vl) {
301   return vredxor_vs_u8m2_u8m1(dst, vector, scalar, vl);
302 }
303 
304 //
305 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1(
306 // CHECK-RV64-NEXT:  entry:
307 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
308 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
309 //
test_vredxor_vs_u8m4_u8m1(vuint8m1_t dst,vuint8m4_t vector,vuint8m1_t scalar,size_t vl)310 vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
311                                      vuint8m1_t scalar, size_t vl) {
312   return vredxor_vs_u8m4_u8m1(dst, vector, scalar, vl);
313 }
314 
315 //
316 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1(
317 // CHECK-RV64-NEXT:  entry:
318 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
319 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
320 //
test_vredxor_vs_u8m8_u8m1(vuint8m1_t dst,vuint8m8_t vector,vuint8m1_t scalar,size_t vl)321 vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
322                                      vuint8m1_t scalar, size_t vl) {
323   return vredxor_vs_u8m8_u8m1(dst, vector, scalar, vl);
324 }
325 
326 //
327 // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1(
328 // CHECK-RV64-NEXT:  entry:
329 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
330 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
331 //
test_vredxor_vs_u16mf4_u16m1(vuint16m1_t dst,vuint16mf4_t vector,vuint16m1_t scalar,size_t vl)332 vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
333                                          vuint16m1_t scalar, size_t vl) {
334   return vredxor_vs_u16mf4_u16m1(dst, vector, scalar, vl);
335 }
336 
337 //
338 // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1(
339 // CHECK-RV64-NEXT:  entry:
340 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
341 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
342 //
test_vredxor_vs_u16mf2_u16m1(vuint16m1_t dst,vuint16mf2_t vector,vuint16m1_t scalar,size_t vl)343 vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
344                                          vuint16m1_t scalar, size_t vl) {
345   return vredxor_vs_u16mf2_u16m1(dst, vector, scalar, vl);
346 }
347 
348 //
349 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1(
350 // CHECK-RV64-NEXT:  entry:
351 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
352 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
353 //
test_vredxor_vs_u16m1_u16m1(vuint16m1_t dst,vuint16m1_t vector,vuint16m1_t scalar,size_t vl)354 vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
355                                         vuint16m1_t scalar, size_t vl) {
356   return vredxor_vs_u16m1_u16m1(dst, vector, scalar, vl);
357 }
358 
359 //
360 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1(
361 // CHECK-RV64-NEXT:  entry:
362 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
363 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
364 //
test_vredxor_vs_u16m2_u16m1(vuint16m1_t dst,vuint16m2_t vector,vuint16m1_t scalar,size_t vl)365 vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
366                                         vuint16m1_t scalar, size_t vl) {
367   return vredxor_vs_u16m2_u16m1(dst, vector, scalar, vl);
368 }
369 
370 //
371 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1(
372 // CHECK-RV64-NEXT:  entry:
373 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
374 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
375 //
test_vredxor_vs_u16m4_u16m1(vuint16m1_t dst,vuint16m4_t vector,vuint16m1_t scalar,size_t vl)376 vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
377                                         vuint16m1_t scalar, size_t vl) {
378   return vredxor_vs_u16m4_u16m1(dst, vector, scalar, vl);
379 }
380 
381 //
382 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1(
383 // CHECK-RV64-NEXT:  entry:
384 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
385 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
386 //
test_vredxor_vs_u16m8_u16m1(vuint16m1_t dst,vuint16m8_t vector,vuint16m1_t scalar,size_t vl)387 vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
388                                         vuint16m1_t scalar, size_t vl) {
389   return vredxor_vs_u16m8_u16m1(dst, vector, scalar, vl);
390 }
391 
392 //
393 // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1(
394 // CHECK-RV64-NEXT:  entry:
395 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
396 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
397 //
test_vredxor_vs_u32mf2_u32m1(vuint32m1_t dst,vuint32mf2_t vector,vuint32m1_t scalar,size_t vl)398 vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
399                                          vuint32m1_t scalar, size_t vl) {
400   return vredxor_vs_u32mf2_u32m1(dst, vector, scalar, vl);
401 }
402 
403 //
404 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1(
405 // CHECK-RV64-NEXT:  entry:
406 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
407 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
408 //
test_vredxor_vs_u32m1_u32m1(vuint32m1_t dst,vuint32m1_t vector,vuint32m1_t scalar,size_t vl)409 vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
410                                         vuint32m1_t scalar, size_t vl) {
411   return vredxor_vs_u32m1_u32m1(dst, vector, scalar, vl);
412 }
413 
414 //
415 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1(
416 // CHECK-RV64-NEXT:  entry:
417 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
418 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
419 //
test_vredxor_vs_u32m2_u32m1(vuint32m1_t dst,vuint32m2_t vector,vuint32m1_t scalar,size_t vl)420 vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
421                                         vuint32m1_t scalar, size_t vl) {
422   return vredxor_vs_u32m2_u32m1(dst, vector, scalar, vl);
423 }
424 
425 //
426 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1(
427 // CHECK-RV64-NEXT:  entry:
428 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
429 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
430 //
test_vredxor_vs_u32m4_u32m1(vuint32m1_t dst,vuint32m4_t vector,vuint32m1_t scalar,size_t vl)431 vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
432                                         vuint32m1_t scalar, size_t vl) {
433   return vredxor_vs_u32m4_u32m1(dst, vector, scalar, vl);
434 }
435 
436 //
437 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1(
438 // CHECK-RV64-NEXT:  entry:
439 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
440 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
441 //
test_vredxor_vs_u32m8_u32m1(vuint32m1_t dst,vuint32m8_t vector,vuint32m1_t scalar,size_t vl)442 vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
443                                         vuint32m1_t scalar, size_t vl) {
444   return vredxor_vs_u32m8_u32m1(dst, vector, scalar, vl);
445 }
446 
447 //
448 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1(
449 // CHECK-RV64-NEXT:  entry:
450 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
451 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
452 //
test_vredxor_vs_u64m1_u64m1(vuint64m1_t dst,vuint64m1_t vector,vuint64m1_t scalar,size_t vl)453 vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
454                                         vuint64m1_t scalar, size_t vl) {
455   return vredxor_vs_u64m1_u64m1(dst, vector, scalar, vl);
456 }
457 
458 //
459 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1(
460 // CHECK-RV64-NEXT:  entry:
461 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
462 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
463 //
test_vredxor_vs_u64m2_u64m1(vuint64m1_t dst,vuint64m2_t vector,vuint64m1_t scalar,size_t vl)464 vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
465                                         vuint64m1_t scalar, size_t vl) {
466   return vredxor_vs_u64m2_u64m1(dst, vector, scalar, vl);
467 }
468 
469 //
470 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1(
471 // CHECK-RV64-NEXT:  entry:
472 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
473 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
474 //
test_vredxor_vs_u64m4_u64m1(vuint64m1_t dst,vuint64m4_t vector,vuint64m1_t scalar,size_t vl)475 vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
476                                         vuint64m1_t scalar, size_t vl) {
477   return vredxor_vs_u64m4_u64m1(dst, vector, scalar, vl);
478 }
479 
480 //
481 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1(
482 // CHECK-RV64-NEXT:  entry:
483 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
484 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
485 //
test_vredxor_vs_u64m8_u64m1(vuint64m1_t dst,vuint64m8_t vector,vuint64m1_t scalar,size_t vl)486 vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
487                                         vuint64m1_t scalar, size_t vl) {
488   return vredxor_vs_u64m8_u64m1(dst, vector, scalar, vl);
489 }
490 
491 //
492 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_m(
493 // CHECK-RV64-NEXT:  entry:
494 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
495 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
496 //
test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask,vint8m1_t dst,vint8mf8_t vector,vint8m1_t scalar,size_t vl)497 vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
498                                        vint8mf8_t vector, vint8m1_t scalar,
499                                        size_t vl) {
500   return vredxor_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl);
501 }
502 
503 //
504 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_m(
505 // CHECK-RV64-NEXT:  entry:
506 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
507 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
508 //
test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask,vint8m1_t dst,vint8mf4_t vector,vint8m1_t scalar,size_t vl)509 vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
510                                        vint8mf4_t vector, vint8m1_t scalar,
511                                        size_t vl) {
512   return vredxor_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl);
513 }
514 
515 //
516 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_m(
517 // CHECK-RV64-NEXT:  entry:
518 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
519 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
520 //
test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask,vint8m1_t dst,vint8mf2_t vector,vint8m1_t scalar,size_t vl)521 vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
522                                        vint8mf2_t vector, vint8m1_t scalar,
523                                        size_t vl) {
524   return vredxor_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl);
525 }
526 
527 //
528 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_m(
529 // CHECK-RV64-NEXT:  entry:
530 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
531 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
532 //
test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask,vint8m1_t dst,vint8m1_t vector,vint8m1_t scalar,size_t vl)533 vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
534                                       vint8m1_t vector, vint8m1_t scalar,
535                                       size_t vl) {
536   return vredxor_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl);
537 }
538 
539 //
540 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_m(
541 // CHECK-RV64-NEXT:  entry:
542 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
543 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
544 //
test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask,vint8m1_t dst,vint8m2_t vector,vint8m1_t scalar,size_t vl)545 vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
546                                       vint8m2_t vector, vint8m1_t scalar,
547                                       size_t vl) {
548   return vredxor_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl);
549 }
550 
551 //
552 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_m(
553 // CHECK-RV64-NEXT:  entry:
554 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
555 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
556 //
test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask,vint8m1_t dst,vint8m4_t vector,vint8m1_t scalar,size_t vl)557 vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
558                                       vint8m4_t vector, vint8m1_t scalar,
559                                       size_t vl) {
560   return vredxor_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl);
561 }
562 
563 //
564 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_m(
565 // CHECK-RV64-NEXT:  entry:
566 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
567 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
568 //
test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask,vint8m1_t dst,vint8m8_t vector,vint8m1_t scalar,size_t vl)569 vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
570                                       vint8m8_t vector, vint8m1_t scalar,
571                                       size_t vl) {
572   return vredxor_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl);
573 }
574 
575 //
576 // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_m(
577 // CHECK-RV64-NEXT:  entry:
578 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
579 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
580 //
test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask,vint16m1_t dst,vint16mf4_t vector,vint16m1_t scalar,size_t vl)581 vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
582                                           vint16mf4_t vector, vint16m1_t scalar,
583                                           size_t vl) {
584   return vredxor_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl);
585 }
586 
587 //
588 // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_m(
589 // CHECK-RV64-NEXT:  entry:
590 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
591 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
592 //
test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask,vint16m1_t dst,vint16mf2_t vector,vint16m1_t scalar,size_t vl)593 vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
594                                           vint16mf2_t vector, vint16m1_t scalar,
595                                           size_t vl) {
596   return vredxor_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl);
597 }
598 
599 //
600 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_m(
601 // CHECK-RV64-NEXT:  entry:
602 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
603 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
604 //
test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask,vint16m1_t dst,vint16m1_t vector,vint16m1_t scalar,size_t vl)605 vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
606                                          vint16m1_t vector, vint16m1_t scalar,
607                                          size_t vl) {
608   return vredxor_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl);
609 }
610 
611 //
612 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_m(
613 // CHECK-RV64-NEXT:  entry:
614 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
615 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
616 //
test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask,vint16m1_t dst,vint16m2_t vector,vint16m1_t scalar,size_t vl)617 vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
618                                          vint16m2_t vector, vint16m1_t scalar,
619                                          size_t vl) {
620   return vredxor_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl);
621 }
622 
623 //
624 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_m(
625 // CHECK-RV64-NEXT:  entry:
626 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
627 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
628 //
test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask,vint16m1_t dst,vint16m4_t vector,vint16m1_t scalar,size_t vl)629 vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
630                                          vint16m4_t vector, vint16m1_t scalar,
631                                          size_t vl) {
632   return vredxor_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl);
633 }
634 
635 //
636 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_m(
637 // CHECK-RV64-NEXT:  entry:
638 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
639 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
640 //
test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask,vint16m1_t dst,vint16m8_t vector,vint16m1_t scalar,size_t vl)641 vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
642                                          vint16m8_t vector, vint16m1_t scalar,
643                                          size_t vl) {
644   return vredxor_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl);
645 }
646 
647 //
648 // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_m(
649 // CHECK-RV64-NEXT:  entry:
650 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
651 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
652 //
test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask,vint32m1_t dst,vint32mf2_t vector,vint32m1_t scalar,size_t vl)653 vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
654                                           vint32mf2_t vector, vint32m1_t scalar,
655                                           size_t vl) {
656   return vredxor_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl);
657 }
658 
659 //
660 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_m(
661 // CHECK-RV64-NEXT:  entry:
662 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
663 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
664 //
test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask,vint32m1_t dst,vint32m1_t vector,vint32m1_t scalar,size_t vl)665 vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
666                                          vint32m1_t vector, vint32m1_t scalar,
667                                          size_t vl) {
668   return vredxor_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl);
669 }
670 
671 //
672 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_m(
673 // CHECK-RV64-NEXT:  entry:
674 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
675 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
676 //
test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask,vint32m1_t dst,vint32m2_t vector,vint32m1_t scalar,size_t vl)677 vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
678                                          vint32m2_t vector, vint32m1_t scalar,
679                                          size_t vl) {
680   return vredxor_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl);
681 }
682 
683 //
684 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_m(
685 // CHECK-RV64-NEXT:  entry:
686 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
687 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
688 //
test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask,vint32m1_t dst,vint32m4_t vector,vint32m1_t scalar,size_t vl)689 vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
690                                          vint32m4_t vector, vint32m1_t scalar,
691                                          size_t vl) {
692   return vredxor_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl);
693 }
694 
695 //
696 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_m(
697 // CHECK-RV64-NEXT:  entry:
698 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
699 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
700 //
test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask,vint32m1_t dst,vint32m8_t vector,vint32m1_t scalar,size_t vl)701 vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
702                                          vint32m8_t vector, vint32m1_t scalar,
703                                          size_t vl) {
704   return vredxor_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl);
705 }
706 
707 //
708 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_m(
709 // CHECK-RV64-NEXT:  entry:
710 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
711 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
712 //
test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask,vint64m1_t dst,vint64m1_t vector,vint64m1_t scalar,size_t vl)713 vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
714                                          vint64m1_t vector, vint64m1_t scalar,
715                                          size_t vl) {
716   return vredxor_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl);
717 }
718 
719 //
720 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_m(
721 // CHECK-RV64-NEXT:  entry:
722 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
723 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
724 //
test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask,vint64m1_t dst,vint64m2_t vector,vint64m1_t scalar,size_t vl)725 vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
726                                          vint64m2_t vector, vint64m1_t scalar,
727                                          size_t vl) {
728   return vredxor_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl);
729 }
730 
731 //
732 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_m(
733 // CHECK-RV64-NEXT:  entry:
734 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
735 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
736 //
test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask,vint64m1_t dst,vint64m4_t vector,vint64m1_t scalar,size_t vl)737 vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
738                                          vint64m4_t vector, vint64m1_t scalar,
739                                          size_t vl) {
740   return vredxor_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl);
741 }
742 
743 //
744 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_m(
745 // CHECK-RV64-NEXT:  entry:
746 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
747 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
748 //
test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask,vint64m1_t dst,vint64m8_t vector,vint64m1_t scalar,size_t vl)749 vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
750                                          vint64m8_t vector, vint64m1_t scalar,
751                                          size_t vl) {
752   return vredxor_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl);
753 }
754 
755 //
756 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_m(
757 // CHECK-RV64-NEXT:  entry:
758 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
759 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
760 //
test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask,vuint8m1_t dst,vuint8mf8_t vector,vuint8m1_t scalar,size_t vl)761 vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
762                                         vuint8mf8_t vector, vuint8m1_t scalar,
763                                         size_t vl) {
764   return vredxor_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl);
765 }
766 
767 //
768 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_m(
769 // CHECK-RV64-NEXT:  entry:
770 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
771 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
772 //
test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask,vuint8m1_t dst,vuint8mf4_t vector,vuint8m1_t scalar,size_t vl)773 vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
774                                         vuint8mf4_t vector, vuint8m1_t scalar,
775                                         size_t vl) {
776   return vredxor_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl);
777 }
778 
779 //
780 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_m(
781 // CHECK-RV64-NEXT:  entry:
782 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
783 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
784 //
test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask,vuint8m1_t dst,vuint8mf2_t vector,vuint8m1_t scalar,size_t vl)785 vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
786                                         vuint8mf2_t vector, vuint8m1_t scalar,
787                                         size_t vl) {
788   return vredxor_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl);
789 }
790 
791 //
792 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_m(
793 // CHECK-RV64-NEXT:  entry:
794 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
795 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
796 //
test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask,vuint8m1_t dst,vuint8m1_t vector,vuint8m1_t scalar,size_t vl)797 vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
798                                        vuint8m1_t vector, vuint8m1_t scalar,
799                                        size_t vl) {
800   return vredxor_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl);
801 }
802 
803 //
804 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_m(
805 // CHECK-RV64-NEXT:  entry:
806 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
807 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
808 //
test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask,vuint8m1_t dst,vuint8m2_t vector,vuint8m1_t scalar,size_t vl)809 vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
810                                        vuint8m2_t vector, vuint8m1_t scalar,
811                                        size_t vl) {
812   return vredxor_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl);
813 }
814 
815 //
816 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_m(
817 // CHECK-RV64-NEXT:  entry:
818 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
819 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
820 //
test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask,vuint8m1_t dst,vuint8m4_t vector,vuint8m1_t scalar,size_t vl)821 vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
822                                        vuint8m4_t vector, vuint8m1_t scalar,
823                                        size_t vl) {
824   return vredxor_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl);
825 }
826 
827 //
828 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_m(
829 // CHECK-RV64-NEXT:  entry:
830 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
831 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
832 //
test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask,vuint8m1_t dst,vuint8m8_t vector,vuint8m1_t scalar,size_t vl)833 vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
834                                        vuint8m8_t vector, vuint8m1_t scalar,
835                                        size_t vl) {
836   return vredxor_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl);
837 }
838 
839 //
840 // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_m(
841 // CHECK-RV64-NEXT:  entry:
842 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
843 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
844 //
test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask,vuint16m1_t dst,vuint16mf4_t vector,vuint16m1_t scalar,size_t vl)845 vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
846                                            vuint16mf4_t vector,
847                                            vuint16m1_t scalar, size_t vl) {
848   return vredxor_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl);
849 }
850 
851 //
852 // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_m(
853 // CHECK-RV64-NEXT:  entry:
854 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
855 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
856 //
test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask,vuint16m1_t dst,vuint16mf2_t vector,vuint16m1_t scalar,size_t vl)857 vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
858                                            vuint16mf2_t vector,
859                                            vuint16m1_t scalar, size_t vl) {
860   return vredxor_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl);
861 }
862 
863 //
864 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_m(
865 // CHECK-RV64-NEXT:  entry:
866 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
867 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
868 //
test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask,vuint16m1_t dst,vuint16m1_t vector,vuint16m1_t scalar,size_t vl)869 vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
870                                           vuint16m1_t vector,
871                                           vuint16m1_t scalar, size_t vl) {
872   return vredxor_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl);
873 }
874 
875 //
876 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_m(
877 // CHECK-RV64-NEXT:  entry:
878 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
879 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
880 //
test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask,vuint16m1_t dst,vuint16m2_t vector,vuint16m1_t scalar,size_t vl)881 vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
882                                           vuint16m2_t vector,
883                                           vuint16m1_t scalar, size_t vl) {
884   return vredxor_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl);
885 }
886 
887 //
888 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_m(
889 // CHECK-RV64-NEXT:  entry:
890 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
891 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
892 //
test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask,vuint16m1_t dst,vuint16m4_t vector,vuint16m1_t scalar,size_t vl)893 vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
894                                           vuint16m4_t vector,
895                                           vuint16m1_t scalar, size_t vl) {
896   return vredxor_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl);
897 }
898 
899 //
900 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_m(
901 // CHECK-RV64-NEXT:  entry:
902 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
903 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
904 //
test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask,vuint16m1_t dst,vuint16m8_t vector,vuint16m1_t scalar,size_t vl)905 vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
906                                           vuint16m8_t vector,
907                                           vuint16m1_t scalar, size_t vl) {
908   return vredxor_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl);
909 }
910 
911 //
912 // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_m(
913 // CHECK-RV64-NEXT:  entry:
914 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
915 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
916 //
test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask,vuint32m1_t dst,vuint32mf2_t vector,vuint32m1_t scalar,size_t vl)917 vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
918                                            vuint32mf2_t vector,
919                                            vuint32m1_t scalar, size_t vl) {
920   return vredxor_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl);
921 }
922 
923 //
924 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_m(
925 // CHECK-RV64-NEXT:  entry:
926 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
927 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
928 //
test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask,vuint32m1_t dst,vuint32m1_t vector,vuint32m1_t scalar,size_t vl)929 vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
930                                           vuint32m1_t vector,
931                                           vuint32m1_t scalar, size_t vl) {
932   return vredxor_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl);
933 }
934 
935 //
936 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_m(
937 // CHECK-RV64-NEXT:  entry:
938 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
939 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
940 //
test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask,vuint32m1_t dst,vuint32m2_t vector,vuint32m1_t scalar,size_t vl)941 vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
942                                           vuint32m2_t vector,
943                                           vuint32m1_t scalar, size_t vl) {
944   return vredxor_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl);
945 }
946 
947 //
948 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_m(
949 // CHECK-RV64-NEXT:  entry:
950 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
951 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
952 //
test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask,vuint32m1_t dst,vuint32m4_t vector,vuint32m1_t scalar,size_t vl)953 vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
954                                           vuint32m4_t vector,
955                                           vuint32m1_t scalar, size_t vl) {
956   return vredxor_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl);
957 }
958 
959 //
960 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_m(
961 // CHECK-RV64-NEXT:  entry:
962 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
963 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
964 //
test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask,vuint32m1_t dst,vuint32m8_t vector,vuint32m1_t scalar,size_t vl)965 vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
966                                           vuint32m8_t vector,
967                                           vuint32m1_t scalar, size_t vl) {
968   return vredxor_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl);
969 }
970 
971 //
972 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_m(
973 // CHECK-RV64-NEXT:  entry:
974 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
975 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
976 //
test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask,vuint64m1_t dst,vuint64m1_t vector,vuint64m1_t scalar,size_t vl)977 vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
978                                           vuint64m1_t vector,
979                                           vuint64m1_t scalar, size_t vl) {
980   return vredxor_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl);
981 }
982 
983 //
984 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_m(
985 // CHECK-RV64-NEXT:  entry:
986 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
987 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
988 //
test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask,vuint64m1_t dst,vuint64m2_t vector,vuint64m1_t scalar,size_t vl)989 vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
990                                           vuint64m2_t vector,
991                                           vuint64m1_t scalar, size_t vl) {
992   return vredxor_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl);
993 }
994 
995 //
996 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_m(
997 // CHECK-RV64-NEXT:  entry:
998 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
999 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1000 //
test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask,vuint64m1_t dst,vuint64m4_t vector,vuint64m1_t scalar,size_t vl)1001 vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
1002                                           vuint64m4_t vector,
1003                                           vuint64m1_t scalar, size_t vl) {
1004   return vredxor_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl);
1005 }
1006 
1007 //
1008 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_m(
1009 // CHECK-RV64-NEXT:  entry:
1010 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1011 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1012 //
test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask,vuint64m1_t dst,vuint64m8_t vector,vuint64m1_t scalar,size_t vl)1013 vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
1014                                           vuint64m8_t vector,
1015                                           vuint64m1_t scalar, size_t vl) {
1016   return vredxor_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl);
1017 }
1018