1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
4 
5 #include <riscv_vector.h>
6 
7 // CHECK-RV64-LABEL: @test_vand_vv_i8mf8(
8 // CHECK-RV64-NEXT:  entry:
9 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
10 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
11 //
test_vand_vv_i8mf8(vint8mf8_t op1,vint8mf8_t op2,size_t vl)12 vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
13   return vand(op1, op2, vl);
14 }
15 
16 // CHECK-RV64-LABEL: @test_vand_vx_i8mf8(
17 // CHECK-RV64-NEXT:  entry:
18 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
19 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
20 //
test_vand_vx_i8mf8(vint8mf8_t op1,int8_t op2,size_t vl)21 vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
22   return vand(op1, op2, vl);
23 }
24 
25 // CHECK-RV64-LABEL: @test_vand_vv_i8mf4(
26 // CHECK-RV64-NEXT:  entry:
27 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
28 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
29 //
test_vand_vv_i8mf4(vint8mf4_t op1,vint8mf4_t op2,size_t vl)30 vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
31   return vand(op1, op2, vl);
32 }
33 
34 // CHECK-RV64-LABEL: @test_vand_vx_i8mf4(
35 // CHECK-RV64-NEXT:  entry:
36 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
37 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
38 //
test_vand_vx_i8mf4(vint8mf4_t op1,int8_t op2,size_t vl)39 vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
40   return vand(op1, op2, vl);
41 }
42 
43 // CHECK-RV64-LABEL: @test_vand_vv_i8mf2(
44 // CHECK-RV64-NEXT:  entry:
45 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
46 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
47 //
test_vand_vv_i8mf2(vint8mf2_t op1,vint8mf2_t op2,size_t vl)48 vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
49   return vand(op1, op2, vl);
50 }
51 
52 // CHECK-RV64-LABEL: @test_vand_vx_i8mf2(
53 // CHECK-RV64-NEXT:  entry:
54 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
55 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
56 //
test_vand_vx_i8mf2(vint8mf2_t op1,int8_t op2,size_t vl)57 vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
58   return vand(op1, op2, vl);
59 }
60 
61 // CHECK-RV64-LABEL: @test_vand_vv_i8m1(
62 // CHECK-RV64-NEXT:  entry:
63 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
64 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
65 //
test_vand_vv_i8m1(vint8m1_t op1,vint8m1_t op2,size_t vl)66 vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
67   return vand(op1, op2, vl);
68 }
69 
70 // CHECK-RV64-LABEL: @test_vand_vx_i8m1(
71 // CHECK-RV64-NEXT:  entry:
72 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
73 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
74 //
test_vand_vx_i8m1(vint8m1_t op1,int8_t op2,size_t vl)75 vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
76   return vand(op1, op2, vl);
77 }
78 
79 // CHECK-RV64-LABEL: @test_vand_vv_i8m2(
80 // CHECK-RV64-NEXT:  entry:
81 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
82 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
83 //
test_vand_vv_i8m2(vint8m2_t op1,vint8m2_t op2,size_t vl)84 vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
85   return vand(op1, op2, vl);
86 }
87 
88 // CHECK-RV64-LABEL: @test_vand_vx_i8m2(
89 // CHECK-RV64-NEXT:  entry:
90 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
91 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
92 //
test_vand_vx_i8m2(vint8m2_t op1,int8_t op2,size_t vl)93 vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
94   return vand(op1, op2, vl);
95 }
96 
97 // CHECK-RV64-LABEL: @test_vand_vv_i8m4(
98 // CHECK-RV64-NEXT:  entry:
99 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
100 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
101 //
test_vand_vv_i8m4(vint8m4_t op1,vint8m4_t op2,size_t vl)102 vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
103   return vand(op1, op2, vl);
104 }
105 
106 // CHECK-RV64-LABEL: @test_vand_vx_i8m4(
107 // CHECK-RV64-NEXT:  entry:
108 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
109 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
110 //
test_vand_vx_i8m4(vint8m4_t op1,int8_t op2,size_t vl)111 vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
112   return vand(op1, op2, vl);
113 }
114 
115 // CHECK-RV64-LABEL: @test_vand_vv_i8m8(
116 // CHECK-RV64-NEXT:  entry:
117 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
118 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
119 //
test_vand_vv_i8m8(vint8m8_t op1,vint8m8_t op2,size_t vl)120 vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
121   return vand(op1, op2, vl);
122 }
123 
124 // CHECK-RV64-LABEL: @test_vand_vx_i8m8(
125 // CHECK-RV64-NEXT:  entry:
126 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
127 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
128 //
test_vand_vx_i8m8(vint8m8_t op1,int8_t op2,size_t vl)129 vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
130   return vand(op1, op2, vl);
131 }
132 
133 // CHECK-RV64-LABEL: @test_vand_vv_i16mf4(
134 // CHECK-RV64-NEXT:  entry:
135 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
136 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
137 //
test_vand_vv_i16mf4(vint16mf4_t op1,vint16mf4_t op2,size_t vl)138 vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
139   return vand(op1, op2, vl);
140 }
141 
142 // CHECK-RV64-LABEL: @test_vand_vx_i16mf4(
143 // CHECK-RV64-NEXT:  entry:
144 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
145 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
146 //
test_vand_vx_i16mf4(vint16mf4_t op1,int16_t op2,size_t vl)147 vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
148   return vand(op1, op2, vl);
149 }
150 
151 // CHECK-RV64-LABEL: @test_vand_vv_i16mf2(
152 // CHECK-RV64-NEXT:  entry:
153 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
154 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
155 //
test_vand_vv_i16mf2(vint16mf2_t op1,vint16mf2_t op2,size_t vl)156 vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
157   return vand(op1, op2, vl);
158 }
159 
160 // CHECK-RV64-LABEL: @test_vand_vx_i16mf2(
161 // CHECK-RV64-NEXT:  entry:
162 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
163 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
164 //
test_vand_vx_i16mf2(vint16mf2_t op1,int16_t op2,size_t vl)165 vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
166   return vand(op1, op2, vl);
167 }
168 
169 // CHECK-RV64-LABEL: @test_vand_vv_i16m1(
170 // CHECK-RV64-NEXT:  entry:
171 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
172 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
173 //
test_vand_vv_i16m1(vint16m1_t op1,vint16m1_t op2,size_t vl)174 vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
175   return vand(op1, op2, vl);
176 }
177 
178 // CHECK-RV64-LABEL: @test_vand_vx_i16m1(
179 // CHECK-RV64-NEXT:  entry:
180 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
181 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
182 //
test_vand_vx_i16m1(vint16m1_t op1,int16_t op2,size_t vl)183 vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
184   return vand(op1, op2, vl);
185 }
186 
187 // CHECK-RV64-LABEL: @test_vand_vv_i16m2(
188 // CHECK-RV64-NEXT:  entry:
189 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
190 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
191 //
test_vand_vv_i16m2(vint16m2_t op1,vint16m2_t op2,size_t vl)192 vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
193   return vand(op1, op2, vl);
194 }
195 
196 // CHECK-RV64-LABEL: @test_vand_vx_i16m2(
197 // CHECK-RV64-NEXT:  entry:
198 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
199 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
200 //
test_vand_vx_i16m2(vint16m2_t op1,int16_t op2,size_t vl)201 vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
202   return vand(op1, op2, vl);
203 }
204 
205 // CHECK-RV64-LABEL: @test_vand_vv_i16m4(
206 // CHECK-RV64-NEXT:  entry:
207 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
208 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
209 //
test_vand_vv_i16m4(vint16m4_t op1,vint16m4_t op2,size_t vl)210 vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
211   return vand(op1, op2, vl);
212 }
213 
214 // CHECK-RV64-LABEL: @test_vand_vx_i16m4(
215 // CHECK-RV64-NEXT:  entry:
216 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
217 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
218 //
test_vand_vx_i16m4(vint16m4_t op1,int16_t op2,size_t vl)219 vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
220   return vand(op1, op2, vl);
221 }
222 
223 // CHECK-RV64-LABEL: @test_vand_vv_i16m8(
224 // CHECK-RV64-NEXT:  entry:
225 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
226 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
227 //
test_vand_vv_i16m8(vint16m8_t op1,vint16m8_t op2,size_t vl)228 vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
229   return vand(op1, op2, vl);
230 }
231 
232 // CHECK-RV64-LABEL: @test_vand_vx_i16m8(
233 // CHECK-RV64-NEXT:  entry:
234 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
235 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
236 //
test_vand_vx_i16m8(vint16m8_t op1,int16_t op2,size_t vl)237 vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
238   return vand(op1, op2, vl);
239 }
240 
241 // CHECK-RV64-LABEL: @test_vand_vv_i32mf2(
242 // CHECK-RV64-NEXT:  entry:
243 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
244 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
245 //
test_vand_vv_i32mf2(vint32mf2_t op1,vint32mf2_t op2,size_t vl)246 vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
247   return vand(op1, op2, vl);
248 }
249 
250 // CHECK-RV64-LABEL: @test_vand_vx_i32mf2(
251 // CHECK-RV64-NEXT:  entry:
252 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
253 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
254 //
test_vand_vx_i32mf2(vint32mf2_t op1,int32_t op2,size_t vl)255 vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
256   return vand(op1, op2, vl);
257 }
258 
259 // CHECK-RV64-LABEL: @test_vand_vv_i32m1(
260 // CHECK-RV64-NEXT:  entry:
261 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
262 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
263 //
test_vand_vv_i32m1(vint32m1_t op1,vint32m1_t op2,size_t vl)264 vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
265   return vand(op1, op2, vl);
266 }
267 
268 // CHECK-RV64-LABEL: @test_vand_vx_i32m1(
269 // CHECK-RV64-NEXT:  entry:
270 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
271 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
272 //
test_vand_vx_i32m1(vint32m1_t op1,int32_t op2,size_t vl)273 vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
274   return vand(op1, op2, vl);
275 }
276 
277 // CHECK-RV64-LABEL: @test_vand_vv_i32m2(
278 // CHECK-RV64-NEXT:  entry:
279 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
280 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
281 //
test_vand_vv_i32m2(vint32m2_t op1,vint32m2_t op2,size_t vl)282 vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
283   return vand(op1, op2, vl);
284 }
285 
286 // CHECK-RV64-LABEL: @test_vand_vx_i32m2(
287 // CHECK-RV64-NEXT:  entry:
288 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
289 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
290 //
test_vand_vx_i32m2(vint32m2_t op1,int32_t op2,size_t vl)291 vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
292   return vand(op1, op2, vl);
293 }
294 
295 // CHECK-RV64-LABEL: @test_vand_vv_i32m4(
296 // CHECK-RV64-NEXT:  entry:
297 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
298 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
299 //
test_vand_vv_i32m4(vint32m4_t op1,vint32m4_t op2,size_t vl)300 vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
301   return vand(op1, op2, vl);
302 }
303 
304 // CHECK-RV64-LABEL: @test_vand_vx_i32m4(
305 // CHECK-RV64-NEXT:  entry:
306 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
307 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
308 //
test_vand_vx_i32m4(vint32m4_t op1,int32_t op2,size_t vl)309 vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
310   return vand(op1, op2, vl);
311 }
312 
313 // CHECK-RV64-LABEL: @test_vand_vv_i32m8(
314 // CHECK-RV64-NEXT:  entry:
315 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
316 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
317 //
test_vand_vv_i32m8(vint32m8_t op1,vint32m8_t op2,size_t vl)318 vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
319   return vand(op1, op2, vl);
320 }
321 
322 // CHECK-RV64-LABEL: @test_vand_vx_i32m8(
323 // CHECK-RV64-NEXT:  entry:
324 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
325 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
326 //
test_vand_vx_i32m8(vint32m8_t op1,int32_t op2,size_t vl)327 vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
328   return vand(op1, op2, vl);
329 }
330 
331 // CHECK-RV64-LABEL: @test_vand_vv_i64m1(
332 // CHECK-RV64-NEXT:  entry:
333 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
334 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
335 //
test_vand_vv_i64m1(vint64m1_t op1,vint64m1_t op2,size_t vl)336 vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
337   return vand(op1, op2, vl);
338 }
339 
340 // CHECK-RV64-LABEL: @test_vand_vx_i64m1(
341 // CHECK-RV64-NEXT:  entry:
342 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
343 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
344 //
test_vand_vx_i64m1(vint64m1_t op1,int64_t op2,size_t vl)345 vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
346   return vand(op1, op2, vl);
347 }
348 
349 // CHECK-RV64-LABEL: @test_vand_vv_i64m2(
350 // CHECK-RV64-NEXT:  entry:
351 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
352 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
353 //
test_vand_vv_i64m2(vint64m2_t op1,vint64m2_t op2,size_t vl)354 vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
355   return vand(op1, op2, vl);
356 }
357 
358 // CHECK-RV64-LABEL: @test_vand_vx_i64m2(
359 // CHECK-RV64-NEXT:  entry:
360 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
361 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
362 //
test_vand_vx_i64m2(vint64m2_t op1,int64_t op2,size_t vl)363 vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
364   return vand(op1, op2, vl);
365 }
366 
367 // CHECK-RV64-LABEL: @test_vand_vv_i64m4(
368 // CHECK-RV64-NEXT:  entry:
369 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
370 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
371 //
test_vand_vv_i64m4(vint64m4_t op1,vint64m4_t op2,size_t vl)372 vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
373   return vand(op1, op2, vl);
374 }
375 
376 // CHECK-RV64-LABEL: @test_vand_vx_i64m4(
377 // CHECK-RV64-NEXT:  entry:
378 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
379 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
380 //
test_vand_vx_i64m4(vint64m4_t op1,int64_t op2,size_t vl)381 vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
382   return vand(op1, op2, vl);
383 }
384 
385 // CHECK-RV64-LABEL: @test_vand_vv_i64m8(
386 // CHECK-RV64-NEXT:  entry:
387 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
388 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
389 //
test_vand_vv_i64m8(vint64m8_t op1,vint64m8_t op2,size_t vl)390 vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
391   return vand(op1, op2, vl);
392 }
393 
394 // CHECK-RV64-LABEL: @test_vand_vx_i64m8(
395 // CHECK-RV64-NEXT:  entry:
396 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
397 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
398 //
test_vand_vx_i64m8(vint64m8_t op1,int64_t op2,size_t vl)399 vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
400   return vand(op1, op2, vl);
401 }
402 
403 // CHECK-RV64-LABEL: @test_vand_vv_u8mf8(
404 // CHECK-RV64-NEXT:  entry:
405 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
406 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
407 //
test_vand_vv_u8mf8(vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)408 vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
409   return vand(op1, op2, vl);
410 }
411 
412 // CHECK-RV64-LABEL: @test_vand_vx_u8mf8(
413 // CHECK-RV64-NEXT:  entry:
414 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
415 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
416 //
test_vand_vx_u8mf8(vuint8mf8_t op1,uint8_t op2,size_t vl)417 vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
418   return vand(op1, op2, vl);
419 }
420 
421 // CHECK-RV64-LABEL: @test_vand_vv_u8mf4(
422 // CHECK-RV64-NEXT:  entry:
423 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
424 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
425 //
test_vand_vv_u8mf4(vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)426 vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
427   return vand(op1, op2, vl);
428 }
429 
430 // CHECK-RV64-LABEL: @test_vand_vx_u8mf4(
431 // CHECK-RV64-NEXT:  entry:
432 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
433 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
434 //
test_vand_vx_u8mf4(vuint8mf4_t op1,uint8_t op2,size_t vl)435 vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
436   return vand(op1, op2, vl);
437 }
438 
439 // CHECK-RV64-LABEL: @test_vand_vv_u8mf2(
440 // CHECK-RV64-NEXT:  entry:
441 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
442 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
443 //
test_vand_vv_u8mf2(vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)444 vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
445   return vand(op1, op2, vl);
446 }
447 
448 // CHECK-RV64-LABEL: @test_vand_vx_u8mf2(
449 // CHECK-RV64-NEXT:  entry:
450 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
451 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
452 //
test_vand_vx_u8mf2(vuint8mf2_t op1,uint8_t op2,size_t vl)453 vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
454   return vand(op1, op2, vl);
455 }
456 
457 // CHECK-RV64-LABEL: @test_vand_vv_u8m1(
458 // CHECK-RV64-NEXT:  entry:
459 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
460 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
461 //
test_vand_vv_u8m1(vuint8m1_t op1,vuint8m1_t op2,size_t vl)462 vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
463   return vand(op1, op2, vl);
464 }
465 
466 // CHECK-RV64-LABEL: @test_vand_vx_u8m1(
467 // CHECK-RV64-NEXT:  entry:
468 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
469 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
470 //
test_vand_vx_u8m1(vuint8m1_t op1,uint8_t op2,size_t vl)471 vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
472   return vand(op1, op2, vl);
473 }
474 
475 // CHECK-RV64-LABEL: @test_vand_vv_u8m2(
476 // CHECK-RV64-NEXT:  entry:
477 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
478 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
479 //
test_vand_vv_u8m2(vuint8m2_t op1,vuint8m2_t op2,size_t vl)480 vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
481   return vand(op1, op2, vl);
482 }
483 
484 // CHECK-RV64-LABEL: @test_vand_vx_u8m2(
485 // CHECK-RV64-NEXT:  entry:
486 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
487 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
488 //
test_vand_vx_u8m2(vuint8m2_t op1,uint8_t op2,size_t vl)489 vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
490   return vand(op1, op2, vl);
491 }
492 
493 // CHECK-RV64-LABEL: @test_vand_vv_u8m4(
494 // CHECK-RV64-NEXT:  entry:
495 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
496 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
497 //
test_vand_vv_u8m4(vuint8m4_t op1,vuint8m4_t op2,size_t vl)498 vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
499   return vand(op1, op2, vl);
500 }
501 
502 // CHECK-RV64-LABEL: @test_vand_vx_u8m4(
503 // CHECK-RV64-NEXT:  entry:
504 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
505 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
506 //
test_vand_vx_u8m4(vuint8m4_t op1,uint8_t op2,size_t vl)507 vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
508   return vand(op1, op2, vl);
509 }
510 
511 // CHECK-RV64-LABEL: @test_vand_vv_u8m8(
512 // CHECK-RV64-NEXT:  entry:
513 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
514 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
515 //
test_vand_vv_u8m8(vuint8m8_t op1,vuint8m8_t op2,size_t vl)516 vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
517   return vand(op1, op2, vl);
518 }
519 
520 // CHECK-RV64-LABEL: @test_vand_vx_u8m8(
521 // CHECK-RV64-NEXT:  entry:
522 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
523 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
524 //
test_vand_vx_u8m8(vuint8m8_t op1,uint8_t op2,size_t vl)525 vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
526   return vand(op1, op2, vl);
527 }
528 
529 // CHECK-RV64-LABEL: @test_vand_vv_u16mf4(
530 // CHECK-RV64-NEXT:  entry:
531 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
532 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
533 //
test_vand_vv_u16mf4(vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)534 vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
535   return vand(op1, op2, vl);
536 }
537 
538 // CHECK-RV64-LABEL: @test_vand_vx_u16mf4(
539 // CHECK-RV64-NEXT:  entry:
540 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
541 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
542 //
test_vand_vx_u16mf4(vuint16mf4_t op1,uint16_t op2,size_t vl)543 vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
544   return vand(op1, op2, vl);
545 }
546 
547 // CHECK-RV64-LABEL: @test_vand_vv_u16mf2(
548 // CHECK-RV64-NEXT:  entry:
549 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
550 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
551 //
test_vand_vv_u16mf2(vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)552 vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
553   return vand(op1, op2, vl);
554 }
555 
556 // CHECK-RV64-LABEL: @test_vand_vx_u16mf2(
557 // CHECK-RV64-NEXT:  entry:
558 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
559 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
560 //
test_vand_vx_u16mf2(vuint16mf2_t op1,uint16_t op2,size_t vl)561 vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
562   return vand(op1, op2, vl);
563 }
564 
565 // CHECK-RV64-LABEL: @test_vand_vv_u16m1(
566 // CHECK-RV64-NEXT:  entry:
567 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
568 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
569 //
test_vand_vv_u16m1(vuint16m1_t op1,vuint16m1_t op2,size_t vl)570 vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
571   return vand(op1, op2, vl);
572 }
573 
574 // CHECK-RV64-LABEL: @test_vand_vx_u16m1(
575 // CHECK-RV64-NEXT:  entry:
576 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
577 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
578 //
test_vand_vx_u16m1(vuint16m1_t op1,uint16_t op2,size_t vl)579 vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
580   return vand(op1, op2, vl);
581 }
582 
583 // CHECK-RV64-LABEL: @test_vand_vv_u16m2(
584 // CHECK-RV64-NEXT:  entry:
585 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
586 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
587 //
test_vand_vv_u16m2(vuint16m2_t op1,vuint16m2_t op2,size_t vl)588 vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
589   return vand(op1, op2, vl);
590 }
591 
592 // CHECK-RV64-LABEL: @test_vand_vx_u16m2(
593 // CHECK-RV64-NEXT:  entry:
594 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
595 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
596 //
test_vand_vx_u16m2(vuint16m2_t op1,uint16_t op2,size_t vl)597 vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
598   return vand(op1, op2, vl);
599 }
600 
601 // CHECK-RV64-LABEL: @test_vand_vv_u16m4(
602 // CHECK-RV64-NEXT:  entry:
603 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
604 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
605 //
test_vand_vv_u16m4(vuint16m4_t op1,vuint16m4_t op2,size_t vl)606 vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
607   return vand(op1, op2, vl);
608 }
609 
610 // CHECK-RV64-LABEL: @test_vand_vx_u16m4(
611 // CHECK-RV64-NEXT:  entry:
612 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
613 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
614 //
test_vand_vx_u16m4(vuint16m4_t op1,uint16_t op2,size_t vl)615 vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
616   return vand(op1, op2, vl);
617 }
618 
619 // CHECK-RV64-LABEL: @test_vand_vv_u16m8(
620 // CHECK-RV64-NEXT:  entry:
621 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
622 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
623 //
test_vand_vv_u16m8(vuint16m8_t op1,vuint16m8_t op2,size_t vl)624 vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
625   return vand(op1, op2, vl);
626 }
627 
628 // CHECK-RV64-LABEL: @test_vand_vx_u16m8(
629 // CHECK-RV64-NEXT:  entry:
630 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
631 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
632 //
test_vand_vx_u16m8(vuint16m8_t op1,uint16_t op2,size_t vl)633 vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
634   return vand(op1, op2, vl);
635 }
636 
637 // CHECK-RV64-LABEL: @test_vand_vv_u32mf2(
638 // CHECK-RV64-NEXT:  entry:
639 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
640 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
641 //
test_vand_vv_u32mf2(vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)642 vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
643   return vand(op1, op2, vl);
644 }
645 
646 // CHECK-RV64-LABEL: @test_vand_vx_u32mf2(
647 // CHECK-RV64-NEXT:  entry:
648 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
649 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
650 //
test_vand_vx_u32mf2(vuint32mf2_t op1,uint32_t op2,size_t vl)651 vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
652   return vand(op1, op2, vl);
653 }
654 
655 // CHECK-RV64-LABEL: @test_vand_vv_u32m1(
656 // CHECK-RV64-NEXT:  entry:
657 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
658 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
659 //
test_vand_vv_u32m1(vuint32m1_t op1,vuint32m1_t op2,size_t vl)660 vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
661   return vand(op1, op2, vl);
662 }
663 
664 // CHECK-RV64-LABEL: @test_vand_vx_u32m1(
665 // CHECK-RV64-NEXT:  entry:
666 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
667 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
668 //
test_vand_vx_u32m1(vuint32m1_t op1,uint32_t op2,size_t vl)669 vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
670   return vand(op1, op2, vl);
671 }
672 
673 // CHECK-RV64-LABEL: @test_vand_vv_u32m2(
674 // CHECK-RV64-NEXT:  entry:
675 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
676 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
677 //
test_vand_vv_u32m2(vuint32m2_t op1,vuint32m2_t op2,size_t vl)678 vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
679   return vand(op1, op2, vl);
680 }
681 
682 // CHECK-RV64-LABEL: @test_vand_vx_u32m2(
683 // CHECK-RV64-NEXT:  entry:
684 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
685 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
686 //
test_vand_vx_u32m2(vuint32m2_t op1,uint32_t op2,size_t vl)687 vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
688   return vand(op1, op2, vl);
689 }
690 
691 // CHECK-RV64-LABEL: @test_vand_vv_u32m4(
692 // CHECK-RV64-NEXT:  entry:
693 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
694 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
695 //
test_vand_vv_u32m4(vuint32m4_t op1,vuint32m4_t op2,size_t vl)696 vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
697   return vand(op1, op2, vl);
698 }
699 
700 // CHECK-RV64-LABEL: @test_vand_vx_u32m4(
701 // CHECK-RV64-NEXT:  entry:
702 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
703 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
704 //
test_vand_vx_u32m4(vuint32m4_t op1,uint32_t op2,size_t vl)705 vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
706   return vand(op1, op2, vl);
707 }
708 
709 // CHECK-RV64-LABEL: @test_vand_vv_u32m8(
710 // CHECK-RV64-NEXT:  entry:
711 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
712 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
713 //
test_vand_vv_u32m8(vuint32m8_t op1,vuint32m8_t op2,size_t vl)714 vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
715   return vand(op1, op2, vl);
716 }
717 
718 // CHECK-RV64-LABEL: @test_vand_vx_u32m8(
719 // CHECK-RV64-NEXT:  entry:
720 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
721 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
722 //
test_vand_vx_u32m8(vuint32m8_t op1,uint32_t op2,size_t vl)723 vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
724   return vand(op1, op2, vl);
725 }
726 
727 // CHECK-RV64-LABEL: @test_vand_vv_u64m1(
728 // CHECK-RV64-NEXT:  entry:
729 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
730 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
731 //
test_vand_vv_u64m1(vuint64m1_t op1,vuint64m1_t op2,size_t vl)732 vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
733   return vand(op1, op2, vl);
734 }
735 
736 // CHECK-RV64-LABEL: @test_vand_vx_u64m1(
737 // CHECK-RV64-NEXT:  entry:
738 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
739 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
740 //
test_vand_vx_u64m1(vuint64m1_t op1,uint64_t op2,size_t vl)741 vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
742   return vand(op1, op2, vl);
743 }
744 
745 // CHECK-RV64-LABEL: @test_vand_vv_u64m2(
746 // CHECK-RV64-NEXT:  entry:
747 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
748 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
749 //
test_vand_vv_u64m2(vuint64m2_t op1,vuint64m2_t op2,size_t vl)750 vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
751   return vand(op1, op2, vl);
752 }
753 
754 // CHECK-RV64-LABEL: @test_vand_vx_u64m2(
755 // CHECK-RV64-NEXT:  entry:
756 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
757 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
758 //
test_vand_vx_u64m2(vuint64m2_t op1,uint64_t op2,size_t vl)759 vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
760   return vand(op1, op2, vl);
761 }
762 
763 // CHECK-RV64-LABEL: @test_vand_vv_u64m4(
764 // CHECK-RV64-NEXT:  entry:
765 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
766 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
767 //
test_vand_vv_u64m4(vuint64m4_t op1,vuint64m4_t op2,size_t vl)768 vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
769   return vand(op1, op2, vl);
770 }
771 
772 // CHECK-RV64-LABEL: @test_vand_vx_u64m4(
773 // CHECK-RV64-NEXT:  entry:
774 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
775 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
776 //
test_vand_vx_u64m4(vuint64m4_t op1,uint64_t op2,size_t vl)777 vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
778   return vand(op1, op2, vl);
779 }
780 
781 // CHECK-RV64-LABEL: @test_vand_vv_u64m8(
782 // CHECK-RV64-NEXT:  entry:
783 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
784 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
785 //
test_vand_vv_u64m8(vuint64m8_t op1,vuint64m8_t op2,size_t vl)786 vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
787   return vand(op1, op2, vl);
788 }
789 
790 // CHECK-RV64-LABEL: @test_vand_vx_u64m8(
791 // CHECK-RV64-NEXT:  entry:
792 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
793 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
794 //
test_vand_vx_u64m8(vuint64m8_t op1,uint64_t op2,size_t vl)795 vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
796   return vand(op1, op2, vl);
797 }
798 
799 // CHECK-RV64-LABEL: @test_vand_vv_i8mf8_m(
800 // CHECK-RV64-NEXT:  entry:
801 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
802 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
803 //
test_vand_vv_i8mf8_m(vbool64_t mask,vint8mf8_t maskedoff,vint8mf8_t op1,vint8mf8_t op2,size_t vl)804 vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
805   return vand(mask, maskedoff, op1, op2, vl);
806 }
807 
808 // CHECK-RV64-LABEL: @test_vand_vx_i8mf8_m(
809 // CHECK-RV64-NEXT:  entry:
810 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
811 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
812 //
test_vand_vx_i8mf8_m(vbool64_t mask,vint8mf8_t maskedoff,vint8mf8_t op1,int8_t op2,size_t vl)813 vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
814   return vand(mask, maskedoff, op1, op2, vl);
815 }
816 
817 // CHECK-RV64-LABEL: @test_vand_vv_i8mf4_m(
818 // CHECK-RV64-NEXT:  entry:
819 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
820 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
821 //
test_vand_vv_i8mf4_m(vbool32_t mask,vint8mf4_t maskedoff,vint8mf4_t op1,vint8mf4_t op2,size_t vl)822 vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
823   return vand(mask, maskedoff, op1, op2, vl);
824 }
825 
826 // CHECK-RV64-LABEL: @test_vand_vx_i8mf4_m(
827 // CHECK-RV64-NEXT:  entry:
828 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
829 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
830 //
test_vand_vx_i8mf4_m(vbool32_t mask,vint8mf4_t maskedoff,vint8mf4_t op1,int8_t op2,size_t vl)831 vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
832   return vand(mask, maskedoff, op1, op2, vl);
833 }
834 
835 // CHECK-RV64-LABEL: @test_vand_vv_i8mf2_m(
836 // CHECK-RV64-NEXT:  entry:
837 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
838 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
839 //
test_vand_vv_i8mf2_m(vbool16_t mask,vint8mf2_t maskedoff,vint8mf2_t op1,vint8mf2_t op2,size_t vl)840 vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
841   return vand(mask, maskedoff, op1, op2, vl);
842 }
843 
844 // CHECK-RV64-LABEL: @test_vand_vx_i8mf2_m(
845 // CHECK-RV64-NEXT:  entry:
846 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
847 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
848 //
test_vand_vx_i8mf2_m(vbool16_t mask,vint8mf2_t maskedoff,vint8mf2_t op1,int8_t op2,size_t vl)849 vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
850   return vand(mask, maskedoff, op1, op2, vl);
851 }
852 
853 // CHECK-RV64-LABEL: @test_vand_vv_i8m1_m(
854 // CHECK-RV64-NEXT:  entry:
855 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
856 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
857 //
test_vand_vv_i8m1_m(vbool8_t mask,vint8m1_t maskedoff,vint8m1_t op1,vint8m1_t op2,size_t vl)858 vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
859   return vand(mask, maskedoff, op1, op2, vl);
860 }
861 
862 // CHECK-RV64-LABEL: @test_vand_vx_i8m1_m(
863 // CHECK-RV64-NEXT:  entry:
864 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
865 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
866 //
test_vand_vx_i8m1_m(vbool8_t mask,vint8m1_t maskedoff,vint8m1_t op1,int8_t op2,size_t vl)867 vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
868   return vand(mask, maskedoff, op1, op2, vl);
869 }
870 
871 // CHECK-RV64-LABEL: @test_vand_vv_i8m2_m(
872 // CHECK-RV64-NEXT:  entry:
873 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
874 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
875 //
test_vand_vv_i8m2_m(vbool4_t mask,vint8m2_t maskedoff,vint8m2_t op1,vint8m2_t op2,size_t vl)876 vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
877   return vand(mask, maskedoff, op1, op2, vl);
878 }
879 
880 // CHECK-RV64-LABEL: @test_vand_vx_i8m2_m(
881 // CHECK-RV64-NEXT:  entry:
882 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
883 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
884 //
test_vand_vx_i8m2_m(vbool4_t mask,vint8m2_t maskedoff,vint8m2_t op1,int8_t op2,size_t vl)885 vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
886   return vand(mask, maskedoff, op1, op2, vl);
887 }
888 
889 // CHECK-RV64-LABEL: @test_vand_vv_i8m4_m(
890 // CHECK-RV64-NEXT:  entry:
891 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
892 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
893 //
test_vand_vv_i8m4_m(vbool2_t mask,vint8m4_t maskedoff,vint8m4_t op1,vint8m4_t op2,size_t vl)894 vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
895   return vand(mask, maskedoff, op1, op2, vl);
896 }
897 
898 // CHECK-RV64-LABEL: @test_vand_vx_i8m4_m(
899 // CHECK-RV64-NEXT:  entry:
900 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
901 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
902 //
test_vand_vx_i8m4_m(vbool2_t mask,vint8m4_t maskedoff,vint8m4_t op1,int8_t op2,size_t vl)903 vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
904   return vand(mask, maskedoff, op1, op2, vl);
905 }
906 
907 // CHECK-RV64-LABEL: @test_vand_vv_i8m8_m(
908 // CHECK-RV64-NEXT:  entry:
909 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
910 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
911 //
test_vand_vv_i8m8_m(vbool1_t mask,vint8m8_t maskedoff,vint8m8_t op1,vint8m8_t op2,size_t vl)912 vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
913   return vand(mask, maskedoff, op1, op2, vl);
914 }
915 
916 // CHECK-RV64-LABEL: @test_vand_vx_i8m8_m(
917 // CHECK-RV64-NEXT:  entry:
918 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
919 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
920 //
test_vand_vx_i8m8_m(vbool1_t mask,vint8m8_t maskedoff,vint8m8_t op1,int8_t op2,size_t vl)921 vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
922   return vand(mask, maskedoff, op1, op2, vl);
923 }
924 
925 // CHECK-RV64-LABEL: @test_vand_vv_i16mf4_m(
926 // CHECK-RV64-NEXT:  entry:
927 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
928 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
929 //
test_vand_vv_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,vint16mf4_t op2,size_t vl)930 vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
931   return vand(mask, maskedoff, op1, op2, vl);
932 }
933 
934 // CHECK-RV64-LABEL: @test_vand_vx_i16mf4_m(
935 // CHECK-RV64-NEXT:  entry:
936 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
937 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
938 //
test_vand_vx_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,int16_t op2,size_t vl)939 vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
940   return vand(mask, maskedoff, op1, op2, vl);
941 }
942 
943 // CHECK-RV64-LABEL: @test_vand_vv_i16mf2_m(
944 // CHECK-RV64-NEXT:  entry:
945 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
946 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
947 //
test_vand_vv_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,vint16mf2_t op2,size_t vl)948 vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
949   return vand(mask, maskedoff, op1, op2, vl);
950 }
951 
952 // CHECK-RV64-LABEL: @test_vand_vx_i16mf2_m(
953 // CHECK-RV64-NEXT:  entry:
954 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
955 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
956 //
test_vand_vx_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,int16_t op2,size_t vl)957 vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
958   return vand(mask, maskedoff, op1, op2, vl);
959 }
960 
961 // CHECK-RV64-LABEL: @test_vand_vv_i16m1_m(
962 // CHECK-RV64-NEXT:  entry:
963 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
964 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
965 //
test_vand_vv_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,vint16m1_t op2,size_t vl)966 vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
967   return vand(mask, maskedoff, op1, op2, vl);
968 }
969 
970 // CHECK-RV64-LABEL: @test_vand_vx_i16m1_m(
971 // CHECK-RV64-NEXT:  entry:
972 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
973 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
974 //
test_vand_vx_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,int16_t op2,size_t vl)975 vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
976   return vand(mask, maskedoff, op1, op2, vl);
977 }
978 
979 // CHECK-RV64-LABEL: @test_vand_vv_i16m2_m(
980 // CHECK-RV64-NEXT:  entry:
981 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
982 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
983 //
test_vand_vv_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,vint16m2_t op2,size_t vl)984 vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
985   return vand(mask, maskedoff, op1, op2, vl);
986 }
987 
988 // CHECK-RV64-LABEL: @test_vand_vx_i16m2_m(
989 // CHECK-RV64-NEXT:  entry:
990 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
991 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
992 //
test_vand_vx_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,int16_t op2,size_t vl)993 vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
994   return vand(mask, maskedoff, op1, op2, vl);
995 }
996 
997 // CHECK-RV64-LABEL: @test_vand_vv_i16m4_m(
998 // CHECK-RV64-NEXT:  entry:
999 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1000 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1001 //
test_vand_vv_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,vint16m4_t op2,size_t vl)1002 vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
1003   return vand(mask, maskedoff, op1, op2, vl);
1004 }
1005 
1006 // CHECK-RV64-LABEL: @test_vand_vx_i16m4_m(
1007 // CHECK-RV64-NEXT:  entry:
1008 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1009 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1010 //
test_vand_vx_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,int16_t op2,size_t vl)1011 vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
1012   return vand(mask, maskedoff, op1, op2, vl);
1013 }
1014 
1015 // CHECK-RV64-LABEL: @test_vand_vv_i16m8_m(
1016 // CHECK-RV64-NEXT:  entry:
1017 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1018 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1019 //
test_vand_vv_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,vint16m8_t op2,size_t vl)1020 vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
1021   return vand(mask, maskedoff, op1, op2, vl);
1022 }
1023 
1024 // CHECK-RV64-LABEL: @test_vand_vx_i16m8_m(
1025 // CHECK-RV64-NEXT:  entry:
1026 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1027 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1028 //
test_vand_vx_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,int16_t op2,size_t vl)1029 vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
1030   return vand(mask, maskedoff, op1, op2, vl);
1031 }
1032 
1033 // CHECK-RV64-LABEL: @test_vand_vv_i32mf2_m(
1034 // CHECK-RV64-NEXT:  entry:
1035 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1036 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1037 //
test_vand_vv_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,vint32mf2_t op2,size_t vl)1038 vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
1039   return vand(mask, maskedoff, op1, op2, vl);
1040 }
1041 
1042 // CHECK-RV64-LABEL: @test_vand_vx_i32mf2_m(
1043 // CHECK-RV64-NEXT:  entry:
1044 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1045 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1046 //
test_vand_vx_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,int32_t op2,size_t vl)1047 vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
1048   return vand(mask, maskedoff, op1, op2, vl);
1049 }
1050 
1051 // CHECK-RV64-LABEL: @test_vand_vv_i32m1_m(
1052 // CHECK-RV64-NEXT:  entry:
1053 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1054 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1055 //
test_vand_vv_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,vint32m1_t op2,size_t vl)1056 vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
1057   return vand(mask, maskedoff, op1, op2, vl);
1058 }
1059 
1060 // CHECK-RV64-LABEL: @test_vand_vx_i32m1_m(
1061 // CHECK-RV64-NEXT:  entry:
1062 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1063 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1064 //
test_vand_vx_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,int32_t op2,size_t vl)1065 vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
1066   return vand(mask, maskedoff, op1, op2, vl);
1067 }
1068 
1069 // CHECK-RV64-LABEL: @test_vand_vv_i32m2_m(
1070 // CHECK-RV64-NEXT:  entry:
1071 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1072 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1073 //
test_vand_vv_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,vint32m2_t op2,size_t vl)1074 vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
1075   return vand(mask, maskedoff, op1, op2, vl);
1076 }
1077 
1078 // CHECK-RV64-LABEL: @test_vand_vx_i32m2_m(
1079 // CHECK-RV64-NEXT:  entry:
1080 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1081 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1082 //
test_vand_vx_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,int32_t op2,size_t vl)1083 vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
1084   return vand(mask, maskedoff, op1, op2, vl);
1085 }
1086 
1087 // CHECK-RV64-LABEL: @test_vand_vv_i32m4_m(
1088 // CHECK-RV64-NEXT:  entry:
1089 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1090 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1091 //
test_vand_vv_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,vint32m4_t op2,size_t vl)1092 vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
1093   return vand(mask, maskedoff, op1, op2, vl);
1094 }
1095 
1096 // CHECK-RV64-LABEL: @test_vand_vx_i32m4_m(
1097 // CHECK-RV64-NEXT:  entry:
1098 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1099 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1100 //
test_vand_vx_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,int32_t op2,size_t vl)1101 vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
1102   return vand(mask, maskedoff, op1, op2, vl);
1103 }
1104 
1105 // CHECK-RV64-LABEL: @test_vand_vv_i32m8_m(
1106 // CHECK-RV64-NEXT:  entry:
1107 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1108 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1109 //
test_vand_vv_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,vint32m8_t op2,size_t vl)1110 vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
1111   return vand(mask, maskedoff, op1, op2, vl);
1112 }
1113 
1114 // CHECK-RV64-LABEL: @test_vand_vx_i32m8_m(
1115 // CHECK-RV64-NEXT:  entry:
1116 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1117 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1118 //
test_vand_vx_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,int32_t op2,size_t vl)1119 vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
1120   return vand(mask, maskedoff, op1, op2, vl);
1121 }
1122 
1123 // CHECK-RV64-LABEL: @test_vand_vv_i64m1_m(
1124 // CHECK-RV64-NEXT:  entry:
1125 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1126 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1127 //
test_vand_vv_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,vint64m1_t op2,size_t vl)1128 vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
1129   return vand(mask, maskedoff, op1, op2, vl);
1130 }
1131 
1132 // CHECK-RV64-LABEL: @test_vand_vx_i64m1_m(
1133 // CHECK-RV64-NEXT:  entry:
1134 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1135 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1136 //
test_vand_vx_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,int64_t op2,size_t vl)1137 vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
1138   return vand(mask, maskedoff, op1, op2, vl);
1139 }
1140 
1141 // CHECK-RV64-LABEL: @test_vand_vv_i64m2_m(
1142 // CHECK-RV64-NEXT:  entry:
1143 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1144 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1145 //
test_vand_vv_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,vint64m2_t op2,size_t vl)1146 vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
1147   return vand(mask, maskedoff, op1, op2, vl);
1148 }
1149 
1150 // CHECK-RV64-LABEL: @test_vand_vx_i64m2_m(
1151 // CHECK-RV64-NEXT:  entry:
1152 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1153 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1154 //
test_vand_vx_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,int64_t op2,size_t vl)1155 vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
1156   return vand(mask, maskedoff, op1, op2, vl);
1157 }
1158 
1159 // CHECK-RV64-LABEL: @test_vand_vv_i64m4_m(
1160 // CHECK-RV64-NEXT:  entry:
1161 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1162 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1163 //
test_vand_vv_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,vint64m4_t op2,size_t vl)1164 vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
1165   return vand(mask, maskedoff, op1, op2, vl);
1166 }
1167 
1168 // CHECK-RV64-LABEL: @test_vand_vx_i64m4_m(
1169 // CHECK-RV64-NEXT:  entry:
1170 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1171 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1172 //
test_vand_vx_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,int64_t op2,size_t vl)1173 vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
1174   return vand(mask, maskedoff, op1, op2, vl);
1175 }
1176 
1177 // CHECK-RV64-LABEL: @test_vand_vv_i64m8_m(
1178 // CHECK-RV64-NEXT:  entry:
1179 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1180 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1181 //
test_vand_vv_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,vint64m8_t op2,size_t vl)1182 vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
1183   return vand(mask, maskedoff, op1, op2, vl);
1184 }
1185 
1186 // CHECK-RV64-LABEL: @test_vand_vx_i64m8_m(
1187 // CHECK-RV64-NEXT:  entry:
1188 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1189 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1190 //
test_vand_vx_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,int64_t op2,size_t vl)1191 vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
1192   return vand(mask, maskedoff, op1, op2, vl);
1193 }
1194 
1195 // CHECK-RV64-LABEL: @test_vand_vv_u8mf8_m(
1196 // CHECK-RV64-NEXT:  entry:
1197 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1198 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
1199 //
test_vand_vv_u8mf8_m(vbool64_t mask,vuint8mf8_t maskedoff,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)1200 vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
1201   return vand(mask, maskedoff, op1, op2, vl);
1202 }
1203 
1204 // CHECK-RV64-LABEL: @test_vand_vx_u8mf8_m(
1205 // CHECK-RV64-NEXT:  entry:
1206 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1207 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
1208 //
test_vand_vx_u8mf8_m(vbool64_t mask,vuint8mf8_t maskedoff,vuint8mf8_t op1,uint8_t op2,size_t vl)1209 vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
1210   return vand(mask, maskedoff, op1, op2, vl);
1211 }
1212 
1213 // CHECK-RV64-LABEL: @test_vand_vv_u8mf4_m(
1214 // CHECK-RV64-NEXT:  entry:
1215 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1216 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
1217 //
test_vand_vv_u8mf4_m(vbool32_t mask,vuint8mf4_t maskedoff,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)1218 vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
1219   return vand(mask, maskedoff, op1, op2, vl);
1220 }
1221 
1222 // CHECK-RV64-LABEL: @test_vand_vx_u8mf4_m(
1223 // CHECK-RV64-NEXT:  entry:
1224 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1225 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
1226 //
test_vand_vx_u8mf4_m(vbool32_t mask,vuint8mf4_t maskedoff,vuint8mf4_t op1,uint8_t op2,size_t vl)1227 vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
1228   return vand(mask, maskedoff, op1, op2, vl);
1229 }
1230 
1231 // CHECK-RV64-LABEL: @test_vand_vv_u8mf2_m(
1232 // CHECK-RV64-NEXT:  entry:
1233 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1234 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
1235 //
test_vand_vv_u8mf2_m(vbool16_t mask,vuint8mf2_t maskedoff,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)1236 vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
1237   return vand(mask, maskedoff, op1, op2, vl);
1238 }
1239 
1240 // CHECK-RV64-LABEL: @test_vand_vx_u8mf2_m(
1241 // CHECK-RV64-NEXT:  entry:
1242 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1243 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
1244 //
test_vand_vx_u8mf2_m(vbool16_t mask,vuint8mf2_t maskedoff,vuint8mf2_t op1,uint8_t op2,size_t vl)1245 vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
1246   return vand(mask, maskedoff, op1, op2, vl);
1247 }
1248 
1249 // CHECK-RV64-LABEL: @test_vand_vv_u8m1_m(
1250 // CHECK-RV64-NEXT:  entry:
1251 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1252 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
1253 //
test_vand_vv_u8m1_m(vbool8_t mask,vuint8m1_t maskedoff,vuint8m1_t op1,vuint8m1_t op2,size_t vl)1254 vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
1255   return vand(mask, maskedoff, op1, op2, vl);
1256 }
1257 
1258 // CHECK-RV64-LABEL: @test_vand_vx_u8m1_m(
1259 // CHECK-RV64-NEXT:  entry:
1260 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1261 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
1262 //
test_vand_vx_u8m1_m(vbool8_t mask,vuint8m1_t maskedoff,vuint8m1_t op1,uint8_t op2,size_t vl)1263 vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
1264   return vand(mask, maskedoff, op1, op2, vl);
1265 }
1266 
1267 // CHECK-RV64-LABEL: @test_vand_vv_u8m2_m(
1268 // CHECK-RV64-NEXT:  entry:
1269 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1270 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
1271 //
test_vand_vv_u8m2_m(vbool4_t mask,vuint8m2_t maskedoff,vuint8m2_t op1,vuint8m2_t op2,size_t vl)1272 vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
1273   return vand(mask, maskedoff, op1, op2, vl);
1274 }
1275 
1276 // CHECK-RV64-LABEL: @test_vand_vx_u8m2_m(
1277 // CHECK-RV64-NEXT:  entry:
1278 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1279 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
1280 //
test_vand_vx_u8m2_m(vbool4_t mask,vuint8m2_t maskedoff,vuint8m2_t op1,uint8_t op2,size_t vl)1281 vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
1282   return vand(mask, maskedoff, op1, op2, vl);
1283 }
1284 
1285 // CHECK-RV64-LABEL: @test_vand_vv_u8m4_m(
1286 // CHECK-RV64-NEXT:  entry:
1287 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1288 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
1289 //
test_vand_vv_u8m4_m(vbool2_t mask,vuint8m4_t maskedoff,vuint8m4_t op1,vuint8m4_t op2,size_t vl)1290 vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
1291   return vand(mask, maskedoff, op1, op2, vl);
1292 }
1293 
1294 // CHECK-RV64-LABEL: @test_vand_vx_u8m4_m(
1295 // CHECK-RV64-NEXT:  entry:
1296 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1297 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
1298 //
test_vand_vx_u8m4_m(vbool2_t mask,vuint8m4_t maskedoff,vuint8m4_t op1,uint8_t op2,size_t vl)1299 vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
1300   return vand(mask, maskedoff, op1, op2, vl);
1301 }
1302 
1303 // CHECK-RV64-LABEL: @test_vand_vv_u8m8_m(
1304 // CHECK-RV64-NEXT:  entry:
1305 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1306 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
1307 //
test_vand_vv_u8m8_m(vbool1_t mask,vuint8m8_t maskedoff,vuint8m8_t op1,vuint8m8_t op2,size_t vl)1308 vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
1309   return vand(mask, maskedoff, op1, op2, vl);
1310 }
1311 
1312 // CHECK-RV64-LABEL: @test_vand_vx_u8m8_m(
1313 // CHECK-RV64-NEXT:  entry:
1314 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1315 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
1316 //
test_vand_vx_u8m8_m(vbool1_t mask,vuint8m8_t maskedoff,vuint8m8_t op1,uint8_t op2,size_t vl)1317 vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
1318   return vand(mask, maskedoff, op1, op2, vl);
1319 }
1320 
1321 // CHECK-RV64-LABEL: @test_vand_vv_u16mf4_m(
1322 // CHECK-RV64-NEXT:  entry:
1323 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1324 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
1325 //
test_vand_vv_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)1326 vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
1327   return vand(mask, maskedoff, op1, op2, vl);
1328 }
1329 
1330 // CHECK-RV64-LABEL: @test_vand_vx_u16mf4_m(
1331 // CHECK-RV64-NEXT:  entry:
1332 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1333 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
1334 //
test_vand_vx_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,uint16_t op2,size_t vl)1335 vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
1336   return vand(mask, maskedoff, op1, op2, vl);
1337 }
1338 
1339 // CHECK-RV64-LABEL: @test_vand_vv_u16mf2_m(
1340 // CHECK-RV64-NEXT:  entry:
1341 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1342 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
1343 //
test_vand_vv_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)1344 vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
1345   return vand(mask, maskedoff, op1, op2, vl);
1346 }
1347 
1348 // CHECK-RV64-LABEL: @test_vand_vx_u16mf2_m(
1349 // CHECK-RV64-NEXT:  entry:
1350 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1351 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
1352 //
test_vand_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,uint16_t op2,size_t vl)1353 vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
1354   return vand(mask, maskedoff, op1, op2, vl);
1355 }
1356 
1357 // CHECK-RV64-LABEL: @test_vand_vv_u16m1_m(
1358 // CHECK-RV64-NEXT:  entry:
1359 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1360 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
1361 //
test_vand_vv_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,vuint16m1_t op2,size_t vl)1362 vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
1363   return vand(mask, maskedoff, op1, op2, vl);
1364 }
1365 
1366 // CHECK-RV64-LABEL: @test_vand_vx_u16m1_m(
1367 // CHECK-RV64-NEXT:  entry:
1368 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1369 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
1370 //
test_vand_vx_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,uint16_t op2,size_t vl)1371 vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
1372   return vand(mask, maskedoff, op1, op2, vl);
1373 }
1374 
1375 // CHECK-RV64-LABEL: @test_vand_vv_u16m2_m(
1376 // CHECK-RV64-NEXT:  entry:
1377 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1378 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
1379 //
test_vand_vv_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,vuint16m2_t op2,size_t vl)1380 vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
1381   return vand(mask, maskedoff, op1, op2, vl);
1382 }
1383 
1384 // CHECK-RV64-LABEL: @test_vand_vx_u16m2_m(
1385 // CHECK-RV64-NEXT:  entry:
1386 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1387 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
1388 //
test_vand_vx_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,uint16_t op2,size_t vl)1389 vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
1390   return vand(mask, maskedoff, op1, op2, vl);
1391 }
1392 
1393 // CHECK-RV64-LABEL: @test_vand_vv_u16m4_m(
1394 // CHECK-RV64-NEXT:  entry:
1395 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1396 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1397 //
test_vand_vv_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,vuint16m4_t op2,size_t vl)1398 vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
1399   return vand(mask, maskedoff, op1, op2, vl);
1400 }
1401 
1402 // CHECK-RV64-LABEL: @test_vand_vx_u16m4_m(
1403 // CHECK-RV64-NEXT:  entry:
1404 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1405 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1406 //
test_vand_vx_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,uint16_t op2,size_t vl)1407 vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
1408   return vand(mask, maskedoff, op1, op2, vl);
1409 }
1410 
1411 // CHECK-RV64-LABEL: @test_vand_vv_u16m8_m(
1412 // CHECK-RV64-NEXT:  entry:
1413 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1414 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1415 //
test_vand_vv_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,vuint16m8_t op2,size_t vl)1416 vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
1417   return vand(mask, maskedoff, op1, op2, vl);
1418 }
1419 
1420 // CHECK-RV64-LABEL: @test_vand_vx_u16m8_m(
1421 // CHECK-RV64-NEXT:  entry:
1422 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1423 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1424 //
test_vand_vx_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,uint16_t op2,size_t vl)1425 vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
1426   return vand(mask, maskedoff, op1, op2, vl);
1427 }
1428 
1429 // CHECK-RV64-LABEL: @test_vand_vv_u32mf2_m(
1430 // CHECK-RV64-NEXT:  entry:
1431 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1432 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1433 //
test_vand_vv_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)1434 vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
1435   return vand(mask, maskedoff, op1, op2, vl);
1436 }
1437 
1438 // CHECK-RV64-LABEL: @test_vand_vx_u32mf2_m(
1439 // CHECK-RV64-NEXT:  entry:
1440 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1441 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1442 //
test_vand_vx_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,uint32_t op2,size_t vl)1443 vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
1444   return vand(mask, maskedoff, op1, op2, vl);
1445 }
1446 
1447 // CHECK-RV64-LABEL: @test_vand_vv_u32m1_m(
1448 // CHECK-RV64-NEXT:  entry:
1449 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1450 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1451 //
test_vand_vv_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,vuint32m1_t op2,size_t vl)1452 vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
1453   return vand(mask, maskedoff, op1, op2, vl);
1454 }
1455 
1456 // CHECK-RV64-LABEL: @test_vand_vx_u32m1_m(
1457 // CHECK-RV64-NEXT:  entry:
1458 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1459 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1460 //
test_vand_vx_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,uint32_t op2,size_t vl)1461 vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
1462   return vand(mask, maskedoff, op1, op2, vl);
1463 }
1464 
1465 // CHECK-RV64-LABEL: @test_vand_vv_u32m2_m(
1466 // CHECK-RV64-NEXT:  entry:
1467 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1468 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1469 //
test_vand_vv_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,vuint32m2_t op2,size_t vl)1470 vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
1471   return vand(mask, maskedoff, op1, op2, vl);
1472 }
1473 
1474 // CHECK-RV64-LABEL: @test_vand_vx_u32m2_m(
1475 // CHECK-RV64-NEXT:  entry:
1476 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1477 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1478 //
test_vand_vx_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,uint32_t op2,size_t vl)1479 vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
1480   return vand(mask, maskedoff, op1, op2, vl);
1481 }
1482 
1483 // CHECK-RV64-LABEL: @test_vand_vv_u32m4_m(
1484 // CHECK-RV64-NEXT:  entry:
1485 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1486 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1487 //
test_vand_vv_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,vuint32m4_t op2,size_t vl)1488 vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
1489   return vand(mask, maskedoff, op1, op2, vl);
1490 }
1491 
1492 // CHECK-RV64-LABEL: @test_vand_vx_u32m4_m(
1493 // CHECK-RV64-NEXT:  entry:
1494 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1495 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1496 //
test_vand_vx_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,uint32_t op2,size_t vl)1497 vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
1498   return vand(mask, maskedoff, op1, op2, vl);
1499 }
1500 
1501 // CHECK-RV64-LABEL: @test_vand_vv_u32m8_m(
1502 // CHECK-RV64-NEXT:  entry:
1503 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1504 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1505 //
test_vand_vv_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,vuint32m8_t op2,size_t vl)1506 vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
1507   return vand(mask, maskedoff, op1, op2, vl);
1508 }
1509 
1510 // CHECK-RV64-LABEL: @test_vand_vx_u32m8_m(
1511 // CHECK-RV64-NEXT:  entry:
1512 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1513 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1514 //
test_vand_vx_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,uint32_t op2,size_t vl)1515 vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
1516   return vand(mask, maskedoff, op1, op2, vl);
1517 }
1518 
1519 // CHECK-RV64-LABEL: @test_vand_vv_u64m1_m(
1520 // CHECK-RV64-NEXT:  entry:
1521 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1522 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1523 //
test_vand_vv_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,vuint64m1_t op2,size_t vl)1524 vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
1525   return vand(mask, maskedoff, op1, op2, vl);
1526 }
1527 
1528 // CHECK-RV64-LABEL: @test_vand_vx_u64m1_m(
1529 // CHECK-RV64-NEXT:  entry:
1530 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1531 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1532 //
test_vand_vx_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,uint64_t op2,size_t vl)1533 vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
1534   return vand(mask, maskedoff, op1, op2, vl);
1535 }
1536 
1537 // CHECK-RV64-LABEL: @test_vand_vv_u64m2_m(
1538 // CHECK-RV64-NEXT:  entry:
1539 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1540 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1541 //
test_vand_vv_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,vuint64m2_t op2,size_t vl)1542 vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
1543   return vand(mask, maskedoff, op1, op2, vl);
1544 }
1545 
1546 // CHECK-RV64-LABEL: @test_vand_vx_u64m2_m(
1547 // CHECK-RV64-NEXT:  entry:
1548 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1549 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1550 //
test_vand_vx_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,uint64_t op2,size_t vl)1551 vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
1552   return vand(mask, maskedoff, op1, op2, vl);
1553 }
1554 
1555 // CHECK-RV64-LABEL: @test_vand_vv_u64m4_m(
1556 // CHECK-RV64-NEXT:  entry:
1557 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1558 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1559 //
test_vand_vv_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,vuint64m4_t op2,size_t vl)1560 vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
1561   return vand(mask, maskedoff, op1, op2, vl);
1562 }
1563 
1564 // CHECK-RV64-LABEL: @test_vand_vx_u64m4_m(
1565 // CHECK-RV64-NEXT:  entry:
1566 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1567 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1568 //
test_vand_vx_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,uint64_t op2,size_t vl)1569 vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
1570   return vand(mask, maskedoff, op1, op2, vl);
1571 }
1572 
1573 // CHECK-RV64-LABEL: @test_vand_vv_u64m8_m(
1574 // CHECK-RV64-NEXT:  entry:
1575 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1576 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1577 //
test_vand_vv_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,vuint64m8_t op2,size_t vl)1578 vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
1579   return vand(mask, maskedoff, op1, op2, vl);
1580 }
1581 
1582 // CHECK-RV64-LABEL: @test_vand_vx_u64m8_m(
1583 // CHECK-RV64-NEXT:  entry:
1584 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1585 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1586 //
test_vand_vx_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,uint64_t op2,size_t vl)1587 vuint64m8_t test_vand_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
1588   return vand(mask, maskedoff, op1, op2, vl);
1589 }
1590 
1591 // CHECK-RV64-LABEL: @test_vand_vv_i8mf8_mt(
1592 // CHECK-RV64-NEXT:  entry:
1593 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1594 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
1595 //
test_vand_vv_i8mf8_mt(vbool64_t mask,vint8mf8_t maskedoff,vint8mf8_t op1,vint8mf8_t op2,size_t vl,size_t ta)1596 vint8mf8_t test_vand_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, size_t ta) {
1597   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1598 }
1599 
1600 // CHECK-RV64-LABEL: @test_vand_vx_i8mf8_mt(
1601 // CHECK-RV64-NEXT:  entry:
1602 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1603 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
1604 //
test_vand_vx_i8mf8_mt(vbool64_t mask,vint8mf8_t maskedoff,vint8mf8_t op1,int8_t op2,size_t vl,size_t ta)1605 vint8mf8_t test_vand_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, size_t ta) {
1606   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1607 }
1608 
1609 // CHECK-RV64-LABEL: @test_vand_vv_i8mf4_mt(
1610 // CHECK-RV64-NEXT:  entry:
1611 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1612 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
1613 //
test_vand_vv_i8mf4_mt(vbool32_t mask,vint8mf4_t maskedoff,vint8mf4_t op1,vint8mf4_t op2,size_t vl,size_t ta)1614 vint8mf4_t test_vand_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, size_t ta) {
1615   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1616 }
1617 
1618 // CHECK-RV64-LABEL: @test_vand_vx_i8mf4_mt(
1619 // CHECK-RV64-NEXT:  entry:
1620 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1621 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
1622 //
test_vand_vx_i8mf4_mt(vbool32_t mask,vint8mf4_t maskedoff,vint8mf4_t op1,int8_t op2,size_t vl,size_t ta)1623 vint8mf4_t test_vand_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, size_t ta) {
1624   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1625 }
1626 
1627 // CHECK-RV64-LABEL: @test_vand_vv_i8mf2_mt(
1628 // CHECK-RV64-NEXT:  entry:
1629 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1630 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
1631 //
test_vand_vv_i8mf2_mt(vbool16_t mask,vint8mf2_t maskedoff,vint8mf2_t op1,vint8mf2_t op2,size_t vl,size_t ta)1632 vint8mf2_t test_vand_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, size_t ta) {
1633   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1634 }
1635 
1636 // CHECK-RV64-LABEL: @test_vand_vx_i8mf2_mt(
1637 // CHECK-RV64-NEXT:  entry:
1638 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1639 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
1640 //
test_vand_vx_i8mf2_mt(vbool16_t mask,vint8mf2_t maskedoff,vint8mf2_t op1,int8_t op2,size_t vl,size_t ta)1641 vint8mf2_t test_vand_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, size_t ta) {
1642   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1643 }
1644 
1645 // CHECK-RV64-LABEL: @test_vand_vv_i8m1_mt(
1646 // CHECK-RV64-NEXT:  entry:
1647 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1648 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
1649 //
test_vand_vv_i8m1_mt(vbool8_t mask,vint8m1_t maskedoff,vint8m1_t op1,vint8m1_t op2,size_t vl,size_t ta)1650 vint8m1_t test_vand_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, size_t ta) {
1651   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1652 }
1653 
1654 // CHECK-RV64-LABEL: @test_vand_vx_i8m1_mt(
1655 // CHECK-RV64-NEXT:  entry:
1656 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1657 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
1658 //
test_vand_vx_i8m1_mt(vbool8_t mask,vint8m1_t maskedoff,vint8m1_t op1,int8_t op2,size_t vl,size_t ta)1659 vint8m1_t test_vand_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, size_t ta) {
1660   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1661 }
1662 
1663 // CHECK-RV64-LABEL: @test_vand_vv_i8m2_mt(
1664 // CHECK-RV64-NEXT:  entry:
1665 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1666 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
1667 //
test_vand_vv_i8m2_mt(vbool4_t mask,vint8m2_t maskedoff,vint8m2_t op1,vint8m2_t op2,size_t vl,size_t ta)1668 vint8m2_t test_vand_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, size_t ta) {
1669   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1670 }
1671 
1672 // CHECK-RV64-LABEL: @test_vand_vx_i8m2_mt(
1673 // CHECK-RV64-NEXT:  entry:
1674 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1675 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
1676 //
test_vand_vx_i8m2_mt(vbool4_t mask,vint8m2_t maskedoff,vint8m2_t op1,int8_t op2,size_t vl,size_t ta)1677 vint8m2_t test_vand_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, size_t ta) {
1678   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1679 }
1680 
1681 // CHECK-RV64-LABEL: @test_vand_vv_i8m4_mt(
1682 // CHECK-RV64-NEXT:  entry:
1683 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1684 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
1685 //
test_vand_vv_i8m4_mt(vbool2_t mask,vint8m4_t maskedoff,vint8m4_t op1,vint8m4_t op2,size_t vl,size_t ta)1686 vint8m4_t test_vand_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, size_t ta) {
1687   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1688 }
1689 
1690 // CHECK-RV64-LABEL: @test_vand_vx_i8m4_mt(
1691 // CHECK-RV64-NEXT:  entry:
1692 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1693 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
1694 //
test_vand_vx_i8m4_mt(vbool2_t mask,vint8m4_t maskedoff,vint8m4_t op1,int8_t op2,size_t vl,size_t ta)1695 vint8m4_t test_vand_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, size_t ta) {
1696   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1697 }
1698 
1699 // CHECK-RV64-LABEL: @test_vand_vv_i8m8_mt(
1700 // CHECK-RV64-NEXT:  entry:
1701 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1702 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
1703 //
test_vand_vv_i8m8_mt(vbool1_t mask,vint8m8_t maskedoff,vint8m8_t op1,vint8m8_t op2,size_t vl,size_t ta)1704 vint8m8_t test_vand_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, size_t ta) {
1705   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1706 }
1707 
1708 // CHECK-RV64-LABEL: @test_vand_vx_i8m8_mt(
1709 // CHECK-RV64-NEXT:  entry:
1710 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1711 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
1712 //
test_vand_vx_i8m8_mt(vbool1_t mask,vint8m8_t maskedoff,vint8m8_t op1,int8_t op2,size_t vl,size_t ta)1713 vint8m8_t test_vand_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, size_t ta) {
1714   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1715 }
1716 
1717 // CHECK-RV64-LABEL: @test_vand_vv_i16mf4_mt(
1718 // CHECK-RV64-NEXT:  entry:
1719 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1720 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
1721 //
test_vand_vv_i16mf4_mt(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,vint16mf4_t op2,size_t vl,size_t ta)1722 vint16mf4_t test_vand_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, size_t ta) {
1723   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1724 }
1725 
1726 // CHECK-RV64-LABEL: @test_vand_vx_i16mf4_mt(
1727 // CHECK-RV64-NEXT:  entry:
1728 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1729 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
1730 //
test_vand_vx_i16mf4_mt(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,int16_t op2,size_t vl,size_t ta)1731 vint16mf4_t test_vand_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, size_t ta) {
1732   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1733 }
1734 
1735 // CHECK-RV64-LABEL: @test_vand_vv_i16mf2_mt(
1736 // CHECK-RV64-NEXT:  entry:
1737 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1738 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
1739 //
test_vand_vv_i16mf2_mt(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,vint16mf2_t op2,size_t vl,size_t ta)1740 vint16mf2_t test_vand_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, size_t ta) {
1741   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1742 }
1743 
1744 // CHECK-RV64-LABEL: @test_vand_vx_i16mf2_mt(
1745 // CHECK-RV64-NEXT:  entry:
1746 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1747 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
1748 //
test_vand_vx_i16mf2_mt(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,int16_t op2,size_t vl,size_t ta)1749 vint16mf2_t test_vand_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, size_t ta) {
1750   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1751 }
1752 
1753 // CHECK-RV64-LABEL: @test_vand_vv_i16m1_mt(
1754 // CHECK-RV64-NEXT:  entry:
1755 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1756 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
1757 //
test_vand_vv_i16m1_mt(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,vint16m1_t op2,size_t vl,size_t ta)1758 vint16m1_t test_vand_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, size_t ta) {
1759   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1760 }
1761 
1762 // CHECK-RV64-LABEL: @test_vand_vx_i16m1_mt(
1763 // CHECK-RV64-NEXT:  entry:
1764 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1765 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
1766 //
test_vand_vx_i16m1_mt(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,int16_t op2,size_t vl,size_t ta)1767 vint16m1_t test_vand_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, size_t ta) {
1768   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1769 }
1770 
1771 // CHECK-RV64-LABEL: @test_vand_vv_i16m2_mt(
1772 // CHECK-RV64-NEXT:  entry:
1773 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1774 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
1775 //
test_vand_vv_i16m2_mt(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,vint16m2_t op2,size_t vl,size_t ta)1776 vint16m2_t test_vand_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, size_t ta) {
1777   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1778 }
1779 
1780 // CHECK-RV64-LABEL: @test_vand_vx_i16m2_mt(
1781 // CHECK-RV64-NEXT:  entry:
1782 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1783 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
1784 //
test_vand_vx_i16m2_mt(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,int16_t op2,size_t vl,size_t ta)1785 vint16m2_t test_vand_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, size_t ta) {
1786   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1787 }
1788 
1789 // CHECK-RV64-LABEL: @test_vand_vv_i16m4_mt(
1790 // CHECK-RV64-NEXT:  entry:
1791 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1792 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1793 //
test_vand_vv_i16m4_mt(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,vint16m4_t op2,size_t vl,size_t ta)1794 vint16m4_t test_vand_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, size_t ta) {
1795   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1796 }
1797 
1798 // CHECK-RV64-LABEL: @test_vand_vx_i16m4_mt(
1799 // CHECK-RV64-NEXT:  entry:
1800 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1801 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1802 //
test_vand_vx_i16m4_mt(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,int16_t op2,size_t vl,size_t ta)1803 vint16m4_t test_vand_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, size_t ta) {
1804   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1805 }
1806 
1807 // CHECK-RV64-LABEL: @test_vand_vv_i16m8_mt(
1808 // CHECK-RV64-NEXT:  entry:
1809 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1810 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1811 //
test_vand_vv_i16m8_mt(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,vint16m8_t op2,size_t vl,size_t ta)1812 vint16m8_t test_vand_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, size_t ta) {
1813   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1814 }
1815 
1816 // CHECK-RV64-LABEL: @test_vand_vx_i16m8_mt(
1817 // CHECK-RV64-NEXT:  entry:
1818 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1819 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1820 //
test_vand_vx_i16m8_mt(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,int16_t op2,size_t vl,size_t ta)1821 vint16m8_t test_vand_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, size_t ta) {
1822   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1823 }
1824 
1825 // CHECK-RV64-LABEL: @test_vand_vv_i32mf2_mt(
1826 // CHECK-RV64-NEXT:  entry:
1827 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1828 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1829 //
test_vand_vv_i32mf2_mt(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,vint32mf2_t op2,size_t vl,size_t ta)1830 vint32mf2_t test_vand_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, size_t ta) {
1831   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1832 }
1833 
1834 // CHECK-RV64-LABEL: @test_vand_vx_i32mf2_mt(
1835 // CHECK-RV64-NEXT:  entry:
1836 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1837 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1838 //
test_vand_vx_i32mf2_mt(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,int32_t op2,size_t vl,size_t ta)1839 vint32mf2_t test_vand_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, size_t ta) {
1840   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1841 }
1842 
1843 // CHECK-RV64-LABEL: @test_vand_vv_i32m1_mt(
1844 // CHECK-RV64-NEXT:  entry:
1845 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1846 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1847 //
test_vand_vv_i32m1_mt(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,vint32m1_t op2,size_t vl,size_t ta)1848 vint32m1_t test_vand_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, size_t ta) {
1849   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1850 }
1851 
1852 // CHECK-RV64-LABEL: @test_vand_vx_i32m1_mt(
1853 // CHECK-RV64-NEXT:  entry:
1854 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1855 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1856 //
test_vand_vx_i32m1_mt(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,int32_t op2,size_t vl,size_t ta)1857 vint32m1_t test_vand_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, size_t ta) {
1858   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1859 }
1860 
1861 // CHECK-RV64-LABEL: @test_vand_vv_i32m2_mt(
1862 // CHECK-RV64-NEXT:  entry:
1863 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1864 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1865 //
test_vand_vv_i32m2_mt(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,vint32m2_t op2,size_t vl,size_t ta)1866 vint32m2_t test_vand_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, size_t ta) {
1867   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1868 }
1869 
1870 // CHECK-RV64-LABEL: @test_vand_vx_i32m2_mt(
1871 // CHECK-RV64-NEXT:  entry:
1872 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1873 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1874 //
test_vand_vx_i32m2_mt(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,int32_t op2,size_t vl,size_t ta)1875 vint32m2_t test_vand_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, size_t ta) {
1876   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1877 }
1878 
1879 // CHECK-RV64-LABEL: @test_vand_vv_i32m4_mt(
1880 // CHECK-RV64-NEXT:  entry:
1881 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1882 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1883 //
test_vand_vv_i32m4_mt(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,vint32m4_t op2,size_t vl,size_t ta)1884 vint32m4_t test_vand_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, size_t ta) {
1885   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1886 }
1887 
1888 // CHECK-RV64-LABEL: @test_vand_vx_i32m4_mt(
1889 // CHECK-RV64-NEXT:  entry:
1890 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1891 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1892 //
test_vand_vx_i32m4_mt(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,int32_t op2,size_t vl,size_t ta)1893 vint32m4_t test_vand_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, size_t ta) {
1894   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1895 }
1896 
1897 // CHECK-RV64-LABEL: @test_vand_vv_i32m8_mt(
1898 // CHECK-RV64-NEXT:  entry:
1899 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1900 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1901 //
test_vand_vv_i32m8_mt(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,vint32m8_t op2,size_t vl,size_t ta)1902 vint32m8_t test_vand_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, size_t ta) {
1903   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1904 }
1905 
1906 // CHECK-RV64-LABEL: @test_vand_vx_i32m8_mt(
1907 // CHECK-RV64-NEXT:  entry:
1908 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1909 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1910 //
test_vand_vx_i32m8_mt(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,int32_t op2,size_t vl,size_t ta)1911 vint32m8_t test_vand_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, size_t ta) {
1912   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1913 }
1914 
1915 // CHECK-RV64-LABEL: @test_vand_vv_i64m1_mt(
1916 // CHECK-RV64-NEXT:  entry:
1917 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1918 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1919 //
test_vand_vv_i64m1_mt(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,vint64m1_t op2,size_t vl,size_t ta)1920 vint64m1_t test_vand_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, size_t ta) {
1921   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1922 }
1923 
1924 // CHECK-RV64-LABEL: @test_vand_vx_i64m1_mt(
1925 // CHECK-RV64-NEXT:  entry:
1926 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1927 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1928 //
test_vand_vx_i64m1_mt(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,int64_t op2,size_t vl,size_t ta)1929 vint64m1_t test_vand_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, size_t ta) {
1930   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1931 }
1932 
1933 // CHECK-RV64-LABEL: @test_vand_vv_i64m2_mt(
1934 // CHECK-RV64-NEXT:  entry:
1935 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1936 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1937 //
test_vand_vv_i64m2_mt(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,vint64m2_t op2,size_t vl,size_t ta)1938 vint64m2_t test_vand_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, size_t ta) {
1939   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1940 }
1941 
1942 // CHECK-RV64-LABEL: @test_vand_vx_i64m2_mt(
1943 // CHECK-RV64-NEXT:  entry:
1944 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1945 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1946 //
test_vand_vx_i64m2_mt(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,int64_t op2,size_t vl,size_t ta)1947 vint64m2_t test_vand_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, size_t ta) {
1948   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1949 }
1950 
1951 // CHECK-RV64-LABEL: @test_vand_vv_i64m4_mt(
1952 // CHECK-RV64-NEXT:  entry:
1953 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1954 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1955 //
test_vand_vv_i64m4_mt(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,vint64m4_t op2,size_t vl,size_t ta)1956 vint64m4_t test_vand_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, size_t ta) {
1957   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1958 }
1959 
1960 // CHECK-RV64-LABEL: @test_vand_vx_i64m4_mt(
1961 // CHECK-RV64-NEXT:  entry:
1962 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1963 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1964 //
test_vand_vx_i64m4_mt(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,int64_t op2,size_t vl,size_t ta)1965 vint64m4_t test_vand_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, size_t ta) {
1966   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1967 }
1968 
1969 // CHECK-RV64-LABEL: @test_vand_vv_i64m8_mt(
1970 // CHECK-RV64-NEXT:  entry:
1971 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1972 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1973 //
test_vand_vv_i64m8_mt(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,vint64m8_t op2,size_t vl,size_t ta)1974 vint64m8_t test_vand_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, size_t ta) {
1975   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1976 }
1977 
1978 // CHECK-RV64-LABEL: @test_vand_vx_i64m8_mt(
1979 // CHECK-RV64-NEXT:  entry:
1980 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1981 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1982 //
test_vand_vx_i64m8_mt(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,int64_t op2,size_t vl,size_t ta)1983 vint64m8_t test_vand_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, size_t ta) {
1984   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1985 }
1986 
1987 // CHECK-RV64-LABEL: @test_vand_vv_u8mf8_mt(
1988 // CHECK-RV64-NEXT:  entry:
1989 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1990 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
1991 //
test_vand_vv_u8mf8_mt(vbool64_t mask,vuint8mf8_t maskedoff,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl,size_t ta)1992 vuint8mf8_t test_vand_vv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, size_t ta) {
1993   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
1994 }
1995 
1996 // CHECK-RV64-LABEL: @test_vand_vx_u8mf8_mt(
1997 // CHECK-RV64-NEXT:  entry:
1998 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1999 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
2000 //
test_vand_vx_u8mf8_mt(vbool64_t mask,vuint8mf8_t maskedoff,vuint8mf8_t op1,uint8_t op2,size_t vl,size_t ta)2001 vuint8mf8_t test_vand_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) {
2002   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2003 }
2004 
2005 // CHECK-RV64-LABEL: @test_vand_vv_u8mf4_mt(
2006 // CHECK-RV64-NEXT:  entry:
2007 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2008 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
2009 //
test_vand_vv_u8mf4_mt(vbool32_t mask,vuint8mf4_t maskedoff,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl,size_t ta)2010 vuint8mf4_t test_vand_vv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, size_t ta) {
2011   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2012 }
2013 
2014 // CHECK-RV64-LABEL: @test_vand_vx_u8mf4_mt(
2015 // CHECK-RV64-NEXT:  entry:
2016 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2017 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
2018 //
test_vand_vx_u8mf4_mt(vbool32_t mask,vuint8mf4_t maskedoff,vuint8mf4_t op1,uint8_t op2,size_t vl,size_t ta)2019 vuint8mf4_t test_vand_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) {
2020   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2021 }
2022 
2023 // CHECK-RV64-LABEL: @test_vand_vv_u8mf2_mt(
2024 // CHECK-RV64-NEXT:  entry:
2025 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2026 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
2027 //
test_vand_vv_u8mf2_mt(vbool16_t mask,vuint8mf2_t maskedoff,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl,size_t ta)2028 vuint8mf2_t test_vand_vv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, size_t ta) {
2029   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2030 }
2031 
2032 // CHECK-RV64-LABEL: @test_vand_vx_u8mf2_mt(
2033 // CHECK-RV64-NEXT:  entry:
2034 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2035 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
2036 //
test_vand_vx_u8mf2_mt(vbool16_t mask,vuint8mf2_t maskedoff,vuint8mf2_t op1,uint8_t op2,size_t vl,size_t ta)2037 vuint8mf2_t test_vand_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) {
2038   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2039 }
2040 
2041 // CHECK-RV64-LABEL: @test_vand_vv_u8m1_mt(
2042 // CHECK-RV64-NEXT:  entry:
2043 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2044 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
2045 //
test_vand_vv_u8m1_mt(vbool8_t mask,vuint8m1_t maskedoff,vuint8m1_t op1,vuint8m1_t op2,size_t vl,size_t ta)2046 vuint8m1_t test_vand_vv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, size_t ta) {
2047   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2048 }
2049 
2050 // CHECK-RV64-LABEL: @test_vand_vx_u8m1_mt(
2051 // CHECK-RV64-NEXT:  entry:
2052 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2053 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
2054 //
test_vand_vx_u8m1_mt(vbool8_t mask,vuint8m1_t maskedoff,vuint8m1_t op1,uint8_t op2,size_t vl,size_t ta)2055 vuint8m1_t test_vand_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, size_t ta) {
2056   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2057 }
2058 
2059 // CHECK-RV64-LABEL: @test_vand_vv_u8m2_mt(
2060 // CHECK-RV64-NEXT:  entry:
2061 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2062 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
2063 //
test_vand_vv_u8m2_mt(vbool4_t mask,vuint8m2_t maskedoff,vuint8m2_t op1,vuint8m2_t op2,size_t vl,size_t ta)2064 vuint8m2_t test_vand_vv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, size_t ta) {
2065   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2066 }
2067 
2068 // CHECK-RV64-LABEL: @test_vand_vx_u8m2_mt(
2069 // CHECK-RV64-NEXT:  entry:
2070 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2071 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
2072 //
test_vand_vx_u8m2_mt(vbool4_t mask,vuint8m2_t maskedoff,vuint8m2_t op1,uint8_t op2,size_t vl,size_t ta)2073 vuint8m2_t test_vand_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, size_t ta) {
2074   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2075 }
2076 
2077 // CHECK-RV64-LABEL: @test_vand_vv_u8m4_mt(
2078 // CHECK-RV64-NEXT:  entry:
2079 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2080 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
2081 //
test_vand_vv_u8m4_mt(vbool2_t mask,vuint8m4_t maskedoff,vuint8m4_t op1,vuint8m4_t op2,size_t vl,size_t ta)2082 vuint8m4_t test_vand_vv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, size_t ta) {
2083   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2084 }
2085 
2086 // CHECK-RV64-LABEL: @test_vand_vx_u8m4_mt(
2087 // CHECK-RV64-NEXT:  entry:
2088 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2089 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
2090 //
test_vand_vx_u8m4_mt(vbool2_t mask,vuint8m4_t maskedoff,vuint8m4_t op1,uint8_t op2,size_t vl,size_t ta)2091 vuint8m4_t test_vand_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, size_t ta) {
2092   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2093 }
2094 
2095 // CHECK-RV64-LABEL: @test_vand_vv_u8m8_mt(
2096 // CHECK-RV64-NEXT:  entry:
2097 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2098 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
2099 //
test_vand_vv_u8m8_mt(vbool1_t mask,vuint8m8_t maskedoff,vuint8m8_t op1,vuint8m8_t op2,size_t vl,size_t ta)2100 vuint8m8_t test_vand_vv_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, size_t ta) {
2101   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2102 }
2103 
2104 // CHECK-RV64-LABEL: @test_vand_vx_u8m8_mt(
2105 // CHECK-RV64-NEXT:  entry:
2106 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2107 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
2108 //
test_vand_vx_u8m8_mt(vbool1_t mask,vuint8m8_t maskedoff,vuint8m8_t op1,uint8_t op2,size_t vl,size_t ta)2109 vuint8m8_t test_vand_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, size_t ta) {
2110   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2111 }
2112 
2113 // CHECK-RV64-LABEL: @test_vand_vv_u16mf4_mt(
2114 // CHECK-RV64-NEXT:  entry:
2115 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2116 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
2117 //
test_vand_vv_u16mf4_mt(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl,size_t ta)2118 vuint16mf4_t test_vand_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) {
2119   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2120 }
2121 
2122 // CHECK-RV64-LABEL: @test_vand_vx_u16mf4_mt(
2123 // CHECK-RV64-NEXT:  entry:
2124 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2125 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
2126 //
test_vand_vx_u16mf4_mt(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,uint16_t op2,size_t vl,size_t ta)2127 vuint16mf4_t test_vand_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) {
2128   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2129 }
2130 
2131 // CHECK-RV64-LABEL: @test_vand_vv_u16mf2_mt(
2132 // CHECK-RV64-NEXT:  entry:
2133 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2134 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
2135 //
test_vand_vv_u16mf2_mt(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl,size_t ta)2136 vuint16mf2_t test_vand_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) {
2137   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2138 }
2139 
2140 // CHECK-RV64-LABEL: @test_vand_vx_u16mf2_mt(
2141 // CHECK-RV64-NEXT:  entry:
2142 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2143 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
2144 //
test_vand_vx_u16mf2_mt(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,uint16_t op2,size_t vl,size_t ta)2145 vuint16mf2_t test_vand_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) {
2146   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2147 }
2148 
2149 // CHECK-RV64-LABEL: @test_vand_vv_u16m1_mt(
2150 // CHECK-RV64-NEXT:  entry:
2151 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2152 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
2153 //
test_vand_vv_u16m1_mt(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,vuint16m1_t op2,size_t vl,size_t ta)2154 vuint16m1_t test_vand_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) {
2155   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2156 }
2157 
2158 // CHECK-RV64-LABEL: @test_vand_vx_u16m1_mt(
2159 // CHECK-RV64-NEXT:  entry:
2160 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2161 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
2162 //
test_vand_vx_u16m1_mt(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,uint16_t op2,size_t vl,size_t ta)2163 vuint16m1_t test_vand_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, size_t ta) {
2164   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2165 }
2166 
2167 // CHECK-RV64-LABEL: @test_vand_vv_u16m2_mt(
2168 // CHECK-RV64-NEXT:  entry:
2169 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2170 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
2171 //
test_vand_vv_u16m2_mt(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,vuint16m2_t op2,size_t vl,size_t ta)2172 vuint16m2_t test_vand_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) {
2173   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2174 }
2175 
2176 // CHECK-RV64-LABEL: @test_vand_vx_u16m2_mt(
2177 // CHECK-RV64-NEXT:  entry:
2178 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2179 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
2180 //
test_vand_vx_u16m2_mt(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,uint16_t op2,size_t vl,size_t ta)2181 vuint16m2_t test_vand_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, size_t ta) {
2182   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2183 }
2184 
2185 // CHECK-RV64-LABEL: @test_vand_vv_u16m4_mt(
2186 // CHECK-RV64-NEXT:  entry:
2187 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2188 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
2189 //
test_vand_vv_u16m4_mt(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,vuint16m4_t op2,size_t vl,size_t ta)2190 vuint16m4_t test_vand_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) {
2191   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2192 }
2193 
2194 // CHECK-RV64-LABEL: @test_vand_vx_u16m4_mt(
2195 // CHECK-RV64-NEXT:  entry:
2196 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2197 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
2198 //
test_vand_vx_u16m4_mt(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,uint16_t op2,size_t vl,size_t ta)2199 vuint16m4_t test_vand_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, size_t ta) {
2200   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2201 }
2202 
2203 // CHECK-RV64-LABEL: @test_vand_vv_u16m8_mt(
2204 // CHECK-RV64-NEXT:  entry:
2205 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2206 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
2207 //
test_vand_vv_u16m8_mt(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,vuint16m8_t op2,size_t vl,size_t ta)2208 vuint16m8_t test_vand_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, size_t ta) {
2209   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2210 }
2211 
2212 // CHECK-RV64-LABEL: @test_vand_vx_u16m8_mt(
2213 // CHECK-RV64-NEXT:  entry:
2214 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2215 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
2216 //
test_vand_vx_u16m8_mt(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,uint16_t op2,size_t vl,size_t ta)2217 vuint16m8_t test_vand_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, size_t ta) {
2218   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2219 }
2220 
2221 // CHECK-RV64-LABEL: @test_vand_vv_u32mf2_mt(
2222 // CHECK-RV64-NEXT:  entry:
2223 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2224 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
2225 //
test_vand_vv_u32mf2_mt(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl,size_t ta)2226 vuint32mf2_t test_vand_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, size_t ta) {
2227   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2228 }
2229 
2230 // CHECK-RV64-LABEL: @test_vand_vx_u32mf2_mt(
2231 // CHECK-RV64-NEXT:  entry:
2232 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2233 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
2234 //
test_vand_vx_u32mf2_mt(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,uint32_t op2,size_t vl,size_t ta)2235 vuint32mf2_t test_vand_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) {
2236   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2237 }
2238 
2239 // CHECK-RV64-LABEL: @test_vand_vv_u32m1_mt(
2240 // CHECK-RV64-NEXT:  entry:
2241 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2242 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
2243 //
test_vand_vv_u32m1_mt(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,vuint32m1_t op2,size_t vl,size_t ta)2244 vuint32m1_t test_vand_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, size_t ta) {
2245   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2246 }
2247 
2248 // CHECK-RV64-LABEL: @test_vand_vx_u32m1_mt(
2249 // CHECK-RV64-NEXT:  entry:
2250 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2251 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
2252 //
test_vand_vx_u32m1_mt(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,uint32_t op2,size_t vl,size_t ta)2253 vuint32m1_t test_vand_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, size_t ta) {
2254   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2255 }
2256 
2257 // CHECK-RV64-LABEL: @test_vand_vv_u32m2_mt(
2258 // CHECK-RV64-NEXT:  entry:
2259 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2260 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
2261 //
test_vand_vv_u32m2_mt(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,vuint32m2_t op2,size_t vl,size_t ta)2262 vuint32m2_t test_vand_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, size_t ta) {
2263   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2264 }
2265 
2266 // CHECK-RV64-LABEL: @test_vand_vx_u32m2_mt(
2267 // CHECK-RV64-NEXT:  entry:
2268 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2269 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
2270 //
test_vand_vx_u32m2_mt(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,uint32_t op2,size_t vl,size_t ta)2271 vuint32m2_t test_vand_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, size_t ta) {
2272   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2273 }
2274 
2275 // CHECK-RV64-LABEL: @test_vand_vv_u32m4_mt(
2276 // CHECK-RV64-NEXT:  entry:
2277 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2278 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
2279 //
test_vand_vv_u32m4_mt(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,vuint32m4_t op2,size_t vl,size_t ta)2280 vuint32m4_t test_vand_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, size_t ta) {
2281   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2282 }
2283 
2284 // CHECK-RV64-LABEL: @test_vand_vx_u32m4_mt(
2285 // CHECK-RV64-NEXT:  entry:
2286 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2287 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
2288 //
test_vand_vx_u32m4_mt(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,uint32_t op2,size_t vl,size_t ta)2289 vuint32m4_t test_vand_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, size_t ta) {
2290   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2291 }
2292 
2293 // CHECK-RV64-LABEL: @test_vand_vv_u32m8_mt(
2294 // CHECK-RV64-NEXT:  entry:
2295 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2296 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
2297 //
test_vand_vv_u32m8_mt(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,vuint32m8_t op2,size_t vl,size_t ta)2298 vuint32m8_t test_vand_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, size_t ta) {
2299   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2300 }
2301 
2302 // CHECK-RV64-LABEL: @test_vand_vx_u32m8_mt(
2303 // CHECK-RV64-NEXT:  entry:
2304 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2305 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
2306 //
test_vand_vx_u32m8_mt(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,uint32_t op2,size_t vl,size_t ta)2307 vuint32m8_t test_vand_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, size_t ta) {
2308   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2309 }
2310 
2311 // CHECK-RV64-LABEL: @test_vand_vv_u64m1_mt(
2312 // CHECK-RV64-NEXT:  entry:
2313 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2314 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
2315 //
test_vand_vv_u64m1_mt(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,vuint64m1_t op2,size_t vl,size_t ta)2316 vuint64m1_t test_vand_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, size_t ta) {
2317   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2318 }
2319 
2320 // CHECK-RV64-LABEL: @test_vand_vx_u64m1_mt(
2321 // CHECK-RV64-NEXT:  entry:
2322 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2323 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
2324 //
test_vand_vx_u64m1_mt(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,uint64_t op2,size_t vl,size_t ta)2325 vuint64m1_t test_vand_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, size_t ta) {
2326   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2327 }
2328 
2329 // CHECK-RV64-LABEL: @test_vand_vv_u64m2_mt(
2330 // CHECK-RV64-NEXT:  entry:
2331 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2332 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
2333 //
test_vand_vv_u64m2_mt(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,vuint64m2_t op2,size_t vl,size_t ta)2334 vuint64m2_t test_vand_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, size_t ta) {
2335   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2336 }
2337 
2338 // CHECK-RV64-LABEL: @test_vand_vx_u64m2_mt(
2339 // CHECK-RV64-NEXT:  entry:
2340 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2341 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
2342 //
test_vand_vx_u64m2_mt(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,uint64_t op2,size_t vl,size_t ta)2343 vuint64m2_t test_vand_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, size_t ta) {
2344   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2345 }
2346 
2347 // CHECK-RV64-LABEL: @test_vand_vv_u64m4_mt(
2348 // CHECK-RV64-NEXT:  entry:
2349 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2350 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
2351 //
test_vand_vv_u64m4_mt(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,vuint64m4_t op2,size_t vl,size_t ta)2352 vuint64m4_t test_vand_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, size_t ta) {
2353   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2354 }
2355 
2356 // CHECK-RV64-LABEL: @test_vand_vx_u64m4_mt(
2357 // CHECK-RV64-NEXT:  entry:
2358 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2359 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
2360 //
test_vand_vx_u64m4_mt(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,uint64_t op2,size_t vl,size_t ta)2361 vuint64m4_t test_vand_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, size_t ta) {
2362   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2363 }
2364 
2365 // CHECK-RV64-LABEL: @test_vand_vv_u64m8_mt(
2366 // CHECK-RV64-NEXT:  entry:
2367 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2368 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
2369 //
test_vand_vv_u64m8_mt(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,vuint64m8_t op2,size_t vl,size_t ta)2370 vuint64m8_t test_vand_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, size_t ta) {
2371   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2372 }
2373 
2374 // CHECK-RV64-LABEL: @test_vand_vx_u64m8_mt(
2375 // CHECK-RV64-NEXT:  entry:
2376 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2377 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
2378 //
test_vand_vx_u64m8_mt(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,uint64_t op2,size_t vl,size_t ta)2379 vuint64m8_t test_vand_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, size_t ta) {
2380   return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2381 }
2382 
2383