1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
4 // RUN:   -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
5 
6 #include <riscv_vector.h>
7 
8 //
9 // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8(
10 // CHECK-RV64-NEXT:  entry:
11 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
12 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
13 //
test_vadd_vv_i8mf8(vint8mf8_t op1,vint8mf8_t op2,size_t vl)14 vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
15   return vadd_vv_i8mf8(op1, op2, vl);
16 }
17 
18 //
19 // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8(
20 // CHECK-RV64-NEXT:  entry:
21 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
22 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
23 //
test_vadd_vx_i8mf8(vint8mf8_t op1,int8_t op2,size_t vl)24 vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
25   return vadd_vx_i8mf8(op1, op2, vl);
26 }
27 
28 //
29 // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4(
30 // CHECK-RV64-NEXT:  entry:
31 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
32 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
33 //
test_vadd_vv_i8mf4(vint8mf4_t op1,vint8mf4_t op2,size_t vl)34 vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
35   return vadd_vv_i8mf4(op1, op2, vl);
36 }
37 
38 //
39 // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4(
40 // CHECK-RV64-NEXT:  entry:
41 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
42 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
43 //
test_vadd_vx_i8mf4(vint8mf4_t op1,int8_t op2,size_t vl)44 vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
45   return vadd_vx_i8mf4(op1, op2, vl);
46 }
47 
48 //
49 // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2(
50 // CHECK-RV64-NEXT:  entry:
51 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
52 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
53 //
test_vadd_vv_i8mf2(vint8mf2_t op1,vint8mf2_t op2,size_t vl)54 vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
55   return vadd_vv_i8mf2(op1, op2, vl);
56 }
57 
58 //
59 // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2(
60 // CHECK-RV64-NEXT:  entry:
61 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
62 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
63 //
test_vadd_vx_i8mf2(vint8mf2_t op1,int8_t op2,size_t vl)64 vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
65   return vadd_vx_i8mf2(op1, op2, vl);
66 }
67 
68 //
69 // CHECK-RV64-LABEL: @test_vadd_vv_i8m1(
70 // CHECK-RV64-NEXT:  entry:
71 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
72 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
73 //
test_vadd_vv_i8m1(vint8m1_t op1,vint8m1_t op2,size_t vl)74 vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
75   return vadd_vv_i8m1(op1, op2, vl);
76 }
77 
78 //
79 // CHECK-RV64-LABEL: @test_vadd_vx_i8m1(
80 // CHECK-RV64-NEXT:  entry:
81 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
82 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
83 //
test_vadd_vx_i8m1(vint8m1_t op1,int8_t op2,size_t vl)84 vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
85   return vadd_vx_i8m1(op1, op2, vl);
86 }
87 
88 //
89 // CHECK-RV64-LABEL: @test_vadd_vv_i8m2(
90 // CHECK-RV64-NEXT:  entry:
91 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
92 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
93 //
test_vadd_vv_i8m2(vint8m2_t op1,vint8m2_t op2,size_t vl)94 vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
95   return vadd_vv_i8m2(op1, op2, vl);
96 }
97 
98 //
99 // CHECK-RV64-LABEL: @test_vadd_vx_i8m2(
100 // CHECK-RV64-NEXT:  entry:
101 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
102 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
103 //
test_vadd_vx_i8m2(vint8m2_t op1,int8_t op2,size_t vl)104 vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
105   return vadd_vx_i8m2(op1, op2, vl);
106 }
107 
108 //
109 // CHECK-RV64-LABEL: @test_vadd_vv_i8m4(
110 // CHECK-RV64-NEXT:  entry:
111 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
112 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
113 //
test_vadd_vv_i8m4(vint8m4_t op1,vint8m4_t op2,size_t vl)114 vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
115   return vadd_vv_i8m4(op1, op2, vl);
116 }
117 
118 //
119 // CHECK-RV64-LABEL: @test_vadd_vx_i8m4(
120 // CHECK-RV64-NEXT:  entry:
121 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
122 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
123 //
test_vadd_vx_i8m4(vint8m4_t op1,int8_t op2,size_t vl)124 vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
125   return vadd_vx_i8m4(op1, op2, vl);
126 }
127 
128 //
129 // CHECK-RV64-LABEL: @test_vadd_vv_i8m8(
130 // CHECK-RV64-NEXT:  entry:
131 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
132 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
133 //
test_vadd_vv_i8m8(vint8m8_t op1,vint8m8_t op2,size_t vl)134 vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
135   return vadd_vv_i8m8(op1, op2, vl);
136 }
137 
138 //
139 // CHECK-RV64-LABEL: @test_vadd_vx_i8m8(
140 // CHECK-RV64-NEXT:  entry:
141 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
142 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
143 //
test_vadd_vx_i8m8(vint8m8_t op1,int8_t op2,size_t vl)144 vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
145   return vadd_vx_i8m8(op1, op2, vl);
146 }
147 
148 //
149 // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4(
150 // CHECK-RV64-NEXT:  entry:
151 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
152 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
153 //
test_vadd_vv_i16mf4(vint16mf4_t op1,vint16mf4_t op2,size_t vl)154 vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
155   return vadd_vv_i16mf4(op1, op2, vl);
156 }
157 
158 //
159 // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4(
160 // CHECK-RV64-NEXT:  entry:
161 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
162 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
163 //
test_vadd_vx_i16mf4(vint16mf4_t op1,int16_t op2,size_t vl)164 vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
165   return vadd_vx_i16mf4(op1, op2, vl);
166 }
167 
168 //
169 // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2(
170 // CHECK-RV64-NEXT:  entry:
171 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
172 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
173 //
test_vadd_vv_i16mf2(vint16mf2_t op1,vint16mf2_t op2,size_t vl)174 vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
175   return vadd_vv_i16mf2(op1, op2, vl);
176 }
177 
178 //
179 // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2(
180 // CHECK-RV64-NEXT:  entry:
181 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
182 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
183 //
test_vadd_vx_i16mf2(vint16mf2_t op1,int16_t op2,size_t vl)184 vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
185   return vadd_vx_i16mf2(op1, op2, vl);
186 }
187 
188 //
189 // CHECK-RV64-LABEL: @test_vadd_vv_i16m1(
190 // CHECK-RV64-NEXT:  entry:
191 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
192 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
193 //
test_vadd_vv_i16m1(vint16m1_t op1,vint16m1_t op2,size_t vl)194 vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
195   return vadd_vv_i16m1(op1, op2, vl);
196 }
197 
198 //
199 // CHECK-RV64-LABEL: @test_vadd_vx_i16m1(
200 // CHECK-RV64-NEXT:  entry:
201 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
202 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
203 //
test_vadd_vx_i16m1(vint16m1_t op1,int16_t op2,size_t vl)204 vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
205   return vadd_vx_i16m1(op1, op2, vl);
206 }
207 
208 //
209 // CHECK-RV64-LABEL: @test_vadd_vv_i16m2(
210 // CHECK-RV64-NEXT:  entry:
211 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
212 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
213 //
test_vadd_vv_i16m2(vint16m2_t op1,vint16m2_t op2,size_t vl)214 vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
215   return vadd_vv_i16m2(op1, op2, vl);
216 }
217 
218 //
219 // CHECK-RV64-LABEL: @test_vadd_vx_i16m2(
220 // CHECK-RV64-NEXT:  entry:
221 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
222 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
223 //
test_vadd_vx_i16m2(vint16m2_t op1,int16_t op2,size_t vl)224 vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
225   return vadd_vx_i16m2(op1, op2, vl);
226 }
227 
228 //
229 // CHECK-RV64-LABEL: @test_vadd_vv_i16m4(
230 // CHECK-RV64-NEXT:  entry:
231 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
232 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
233 //
test_vadd_vv_i16m4(vint16m4_t op1,vint16m4_t op2,size_t vl)234 vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
235   return vadd_vv_i16m4(op1, op2, vl);
236 }
237 
238 //
239 // CHECK-RV64-LABEL: @test_vadd_vx_i16m4(
240 // CHECK-RV64-NEXT:  entry:
241 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
242 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
243 //
test_vadd_vx_i16m4(vint16m4_t op1,int16_t op2,size_t vl)244 vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
245   return vadd_vx_i16m4(op1, op2, vl);
246 }
247 
248 //
249 // CHECK-RV64-LABEL: @test_vadd_vv_i16m8(
250 // CHECK-RV64-NEXT:  entry:
251 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
252 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
253 //
test_vadd_vv_i16m8(vint16m8_t op1,vint16m8_t op2,size_t vl)254 vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
255   return vadd_vv_i16m8(op1, op2, vl);
256 }
257 
258 //
259 // CHECK-RV64-LABEL: @test_vadd_vx_i16m8(
260 // CHECK-RV64-NEXT:  entry:
261 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
262 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
263 //
test_vadd_vx_i16m8(vint16m8_t op1,int16_t op2,size_t vl)264 vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
265   return vadd_vx_i16m8(op1, op2, vl);
266 }
267 
268 //
269 // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2(
270 // CHECK-RV64-NEXT:  entry:
271 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
272 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
273 //
test_vadd_vv_i32mf2(vint32mf2_t op1,vint32mf2_t op2,size_t vl)274 vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
275   return vadd_vv_i32mf2(op1, op2, vl);
276 }
277 
278 //
279 // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2(
280 // CHECK-RV64-NEXT:  entry:
281 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
282 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
283 //
test_vadd_vx_i32mf2(vint32mf2_t op1,int32_t op2,size_t vl)284 vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
285   return vadd_vx_i32mf2(op1, op2, vl);
286 }
287 
288 //
289 // CHECK-RV64-LABEL: @test_vadd_vv_i32m1(
290 // CHECK-RV64-NEXT:  entry:
291 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
292 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
293 //
test_vadd_vv_i32m1(vint32m1_t op1,vint32m1_t op2,size_t vl)294 vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
295   return vadd_vv_i32m1(op1, op2, vl);
296 }
297 
298 //
299 // CHECK-RV64-LABEL: @test_vadd_vx_i32m1(
300 // CHECK-RV64-NEXT:  entry:
301 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
302 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
303 //
test_vadd_vx_i32m1(vint32m1_t op1,int32_t op2,size_t vl)304 vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
305   return vadd_vx_i32m1(op1, op2, vl);
306 }
307 
308 //
309 // CHECK-RV64-LABEL: @test_vadd_vv_i32m2(
310 // CHECK-RV64-NEXT:  entry:
311 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
312 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
313 //
test_vadd_vv_i32m2(vint32m2_t op1,vint32m2_t op2,size_t vl)314 vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
315   return vadd_vv_i32m2(op1, op2, vl);
316 }
317 
318 //
319 // CHECK-RV64-LABEL: @test_vadd_vx_i32m2(
320 // CHECK-RV64-NEXT:  entry:
321 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
322 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
323 //
test_vadd_vx_i32m2(vint32m2_t op1,int32_t op2,size_t vl)324 vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
325   return vadd_vx_i32m2(op1, op2, vl);
326 }
327 
328 //
329 // CHECK-RV64-LABEL: @test_vadd_vv_i32m4(
330 // CHECK-RV64-NEXT:  entry:
331 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
332 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
333 //
test_vadd_vv_i32m4(vint32m4_t op1,vint32m4_t op2,size_t vl)334 vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
335   return vadd_vv_i32m4(op1, op2, vl);
336 }
337 
338 //
339 // CHECK-RV64-LABEL: @test_vadd_vx_i32m4(
340 // CHECK-RV64-NEXT:  entry:
341 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
342 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
343 //
test_vadd_vx_i32m4(vint32m4_t op1,int32_t op2,size_t vl)344 vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
345   return vadd_vx_i32m4(op1, op2, vl);
346 }
347 
348 //
349 // CHECK-RV64-LABEL: @test_vadd_vv_i32m8(
350 // CHECK-RV64-NEXT:  entry:
351 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
352 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
353 //
test_vadd_vv_i32m8(vint32m8_t op1,vint32m8_t op2,size_t vl)354 vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
355   return vadd_vv_i32m8(op1, op2, vl);
356 }
357 
358 //
359 // CHECK-RV64-LABEL: @test_vadd_vx_i32m8(
360 // CHECK-RV64-NEXT:  entry:
361 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
362 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
363 //
test_vadd_vx_i32m8(vint32m8_t op1,int32_t op2,size_t vl)364 vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
365   return vadd_vx_i32m8(op1, op2, vl);
366 }
367 
368 //
369 // CHECK-RV64-LABEL: @test_vadd_vv_i64m1(
370 // CHECK-RV64-NEXT:  entry:
371 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
372 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
373 //
test_vadd_vv_i64m1(vint64m1_t op1,vint64m1_t op2,size_t vl)374 vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
375   return vadd_vv_i64m1(op1, op2, vl);
376 }
377 
378 //
379 // CHECK-RV64-LABEL: @test_vadd_vx_i64m1(
380 // CHECK-RV64-NEXT:  entry:
381 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
382 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
383 //
test_vadd_vx_i64m1(vint64m1_t op1,int64_t op2,size_t vl)384 vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
385   return vadd_vx_i64m1(op1, op2, vl);
386 }
387 
388 //
389 // CHECK-RV64-LABEL: @test_vadd_vv_i64m2(
390 // CHECK-RV64-NEXT:  entry:
391 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
392 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
393 //
test_vadd_vv_i64m2(vint64m2_t op1,vint64m2_t op2,size_t vl)394 vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
395   return vadd_vv_i64m2(op1, op2, vl);
396 }
397 
398 //
399 // CHECK-RV64-LABEL: @test_vadd_vx_i64m2(
400 // CHECK-RV64-NEXT:  entry:
401 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
402 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
403 //
test_vadd_vx_i64m2(vint64m2_t op1,int64_t op2,size_t vl)404 vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
405   return vadd_vx_i64m2(op1, op2, vl);
406 }
407 
408 //
409 // CHECK-RV64-LABEL: @test_vadd_vv_i64m4(
410 // CHECK-RV64-NEXT:  entry:
411 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
412 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
413 //
test_vadd_vv_i64m4(vint64m4_t op1,vint64m4_t op2,size_t vl)414 vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
415   return vadd_vv_i64m4(op1, op2, vl);
416 }
417 
418 //
419 // CHECK-RV64-LABEL: @test_vadd_vx_i64m4(
420 // CHECK-RV64-NEXT:  entry:
421 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
422 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
423 //
test_vadd_vx_i64m4(vint64m4_t op1,int64_t op2,size_t vl)424 vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
425   return vadd_vx_i64m4(op1, op2, vl);
426 }
427 
428 //
429 // CHECK-RV64-LABEL: @test_vadd_vv_i64m8(
430 // CHECK-RV64-NEXT:  entry:
431 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
432 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
433 //
test_vadd_vv_i64m8(vint64m8_t op1,vint64m8_t op2,size_t vl)434 vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
435   return vadd_vv_i64m8(op1, op2, vl);
436 }
437 
438 //
439 // CHECK-RV64-LABEL: @test_vadd_vx_i64m8(
440 // CHECK-RV64-NEXT:  entry:
441 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
442 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
443 //
test_vadd_vx_i64m8(vint64m8_t op1,int64_t op2,size_t vl)444 vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
445   return vadd_vx_i64m8(op1, op2, vl);
446 }
447 
448 //
449 // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8(
450 // CHECK-RV64-NEXT:  entry:
451 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
452 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
453 //
test_vadd_vv_u8mf8(vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)454 vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
455   return vadd_vv_u8mf8(op1, op2, vl);
456 }
457 
458 //
459 // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8(
460 // CHECK-RV64-NEXT:  entry:
461 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
462 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
463 //
test_vadd_vx_u8mf8(vuint8mf8_t op1,uint8_t op2,size_t vl)464 vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
465   return vadd_vx_u8mf8(op1, op2, vl);
466 }
467 
468 //
469 // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4(
470 // CHECK-RV64-NEXT:  entry:
471 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
472 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
473 //
test_vadd_vv_u8mf4(vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)474 vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
475   return vadd_vv_u8mf4(op1, op2, vl);
476 }
477 
478 //
479 // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4(
480 // CHECK-RV64-NEXT:  entry:
481 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
482 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
483 //
test_vadd_vx_u8mf4(vuint8mf4_t op1,uint8_t op2,size_t vl)484 vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
485   return vadd_vx_u8mf4(op1, op2, vl);
486 }
487 
488 //
489 // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2(
490 // CHECK-RV64-NEXT:  entry:
491 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
492 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
493 //
test_vadd_vv_u8mf2(vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)494 vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
495   return vadd_vv_u8mf2(op1, op2, vl);
496 }
497 
498 //
499 // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2(
500 // CHECK-RV64-NEXT:  entry:
501 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
502 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
503 //
test_vadd_vx_u8mf2(vuint8mf2_t op1,uint8_t op2,size_t vl)504 vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
505   return vadd_vx_u8mf2(op1, op2, vl);
506 }
507 
508 //
509 // CHECK-RV64-LABEL: @test_vadd_vv_u8m1(
510 // CHECK-RV64-NEXT:  entry:
511 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
512 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
513 //
test_vadd_vv_u8m1(vuint8m1_t op1,vuint8m1_t op2,size_t vl)514 vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
515   return vadd_vv_u8m1(op1, op2, vl);
516 }
517 
518 //
519 // CHECK-RV64-LABEL: @test_vadd_vx_u8m1(
520 // CHECK-RV64-NEXT:  entry:
521 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
522 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
523 //
test_vadd_vx_u8m1(vuint8m1_t op1,uint8_t op2,size_t vl)524 vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
525   return vadd_vx_u8m1(op1, op2, vl);
526 }
527 
528 //
529 // CHECK-RV64-LABEL: @test_vadd_vv_u8m2(
530 // CHECK-RV64-NEXT:  entry:
531 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
532 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
533 //
test_vadd_vv_u8m2(vuint8m2_t op1,vuint8m2_t op2,size_t vl)534 vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
535   return vadd_vv_u8m2(op1, op2, vl);
536 }
537 
538 //
539 // CHECK-RV64-LABEL: @test_vadd_vx_u8m2(
540 // CHECK-RV64-NEXT:  entry:
541 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
542 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
543 //
test_vadd_vx_u8m2(vuint8m2_t op1,uint8_t op2,size_t vl)544 vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
545   return vadd_vx_u8m2(op1, op2, vl);
546 }
547 
548 //
549 // CHECK-RV64-LABEL: @test_vadd_vv_u8m4(
550 // CHECK-RV64-NEXT:  entry:
551 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
552 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
553 //
test_vadd_vv_u8m4(vuint8m4_t op1,vuint8m4_t op2,size_t vl)554 vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
555   return vadd_vv_u8m4(op1, op2, vl);
556 }
557 
558 //
559 // CHECK-RV64-LABEL: @test_vadd_vx_u8m4(
560 // CHECK-RV64-NEXT:  entry:
561 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
562 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
563 //
test_vadd_vx_u8m4(vuint8m4_t op1,uint8_t op2,size_t vl)564 vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
565   return vadd_vx_u8m4(op1, op2, vl);
566 }
567 
568 //
569 // CHECK-RV64-LABEL: @test_vadd_vv_u8m8(
570 // CHECK-RV64-NEXT:  entry:
571 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
572 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
573 //
test_vadd_vv_u8m8(vuint8m8_t op1,vuint8m8_t op2,size_t vl)574 vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
575   return vadd_vv_u8m8(op1, op2, vl);
576 }
577 
578 //
579 // CHECK-RV64-LABEL: @test_vadd_vx_u8m8(
580 // CHECK-RV64-NEXT:  entry:
581 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
582 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
583 //
test_vadd_vx_u8m8(vuint8m8_t op1,uint8_t op2,size_t vl)584 vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
585   return vadd_vx_u8m8(op1, op2, vl);
586 }
587 
588 //
589 // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4(
590 // CHECK-RV64-NEXT:  entry:
591 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
592 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
593 //
test_vadd_vv_u16mf4(vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)594 vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
595   return vadd_vv_u16mf4(op1, op2, vl);
596 }
597 
598 //
599 // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4(
600 // CHECK-RV64-NEXT:  entry:
601 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
602 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
603 //
test_vadd_vx_u16mf4(vuint16mf4_t op1,uint16_t op2,size_t vl)604 vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
605   return vadd_vx_u16mf4(op1, op2, vl);
606 }
607 
608 //
609 // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2(
610 // CHECK-RV64-NEXT:  entry:
611 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
612 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
613 //
test_vadd_vv_u16mf2(vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)614 vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
615   return vadd_vv_u16mf2(op1, op2, vl);
616 }
617 
618 //
619 // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2(
620 // CHECK-RV64-NEXT:  entry:
621 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
622 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
623 //
test_vadd_vx_u16mf2(vuint16mf2_t op1,uint16_t op2,size_t vl)624 vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
625   return vadd_vx_u16mf2(op1, op2, vl);
626 }
627 
628 //
629 // CHECK-RV64-LABEL: @test_vadd_vv_u16m1(
630 // CHECK-RV64-NEXT:  entry:
631 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
632 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
633 //
test_vadd_vv_u16m1(vuint16m1_t op1,vuint16m1_t op2,size_t vl)634 vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
635   return vadd_vv_u16m1(op1, op2, vl);
636 }
637 
638 //
639 // CHECK-RV64-LABEL: @test_vadd_vx_u16m1(
640 // CHECK-RV64-NEXT:  entry:
641 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
642 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
643 //
test_vadd_vx_u16m1(vuint16m1_t op1,uint16_t op2,size_t vl)644 vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
645   return vadd_vx_u16m1(op1, op2, vl);
646 }
647 
648 //
649 // CHECK-RV64-LABEL: @test_vadd_vv_u16m2(
650 // CHECK-RV64-NEXT:  entry:
651 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
652 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
653 //
test_vadd_vv_u16m2(vuint16m2_t op1,vuint16m2_t op2,size_t vl)654 vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
655   return vadd_vv_u16m2(op1, op2, vl);
656 }
657 
658 //
659 // CHECK-RV64-LABEL: @test_vadd_vx_u16m2(
660 // CHECK-RV64-NEXT:  entry:
661 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
662 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
663 //
test_vadd_vx_u16m2(vuint16m2_t op1,uint16_t op2,size_t vl)664 vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
665   return vadd_vx_u16m2(op1, op2, vl);
666 }
667 
668 //
669 // CHECK-RV64-LABEL: @test_vadd_vv_u16m4(
670 // CHECK-RV64-NEXT:  entry:
671 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
672 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
673 //
test_vadd_vv_u16m4(vuint16m4_t op1,vuint16m4_t op2,size_t vl)674 vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
675   return vadd_vv_u16m4(op1, op2, vl);
676 }
677 
678 //
679 // CHECK-RV64-LABEL: @test_vadd_vx_u16m4(
680 // CHECK-RV64-NEXT:  entry:
681 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
682 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
683 //
test_vadd_vx_u16m4(vuint16m4_t op1,uint16_t op2,size_t vl)684 vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
685   return vadd_vx_u16m4(op1, op2, vl);
686 }
687 
688 //
689 // CHECK-RV64-LABEL: @test_vadd_vv_u16m8(
690 // CHECK-RV64-NEXT:  entry:
691 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
692 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
693 //
test_vadd_vv_u16m8(vuint16m8_t op1,vuint16m8_t op2,size_t vl)694 vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
695   return vadd_vv_u16m8(op1, op2, vl);
696 }
697 
698 //
699 // CHECK-RV64-LABEL: @test_vadd_vx_u16m8(
700 // CHECK-RV64-NEXT:  entry:
701 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
702 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
703 //
test_vadd_vx_u16m8(vuint16m8_t op1,uint16_t op2,size_t vl)704 vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
705   return vadd_vx_u16m8(op1, op2, vl);
706 }
707 
708 //
709 // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2(
710 // CHECK-RV64-NEXT:  entry:
711 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
712 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
713 //
test_vadd_vv_u32mf2(vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)714 vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
715   return vadd_vv_u32mf2(op1, op2, vl);
716 }
717 
718 //
719 // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2(
720 // CHECK-RV64-NEXT:  entry:
721 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
722 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
723 //
test_vadd_vx_u32mf2(vuint32mf2_t op1,uint32_t op2,size_t vl)724 vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
725   return vadd_vx_u32mf2(op1, op2, vl);
726 }
727 
728 //
729 // CHECK-RV64-LABEL: @test_vadd_vv_u32m1(
730 // CHECK-RV64-NEXT:  entry:
731 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
732 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
733 //
test_vadd_vv_u32m1(vuint32m1_t op1,vuint32m1_t op2,size_t vl)734 vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
735   return vadd_vv_u32m1(op1, op2, vl);
736 }
737 
738 //
739 // CHECK-RV64-LABEL: @test_vadd_vx_u32m1(
740 // CHECK-RV64-NEXT:  entry:
741 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
742 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
743 //
test_vadd_vx_u32m1(vuint32m1_t op1,uint32_t op2,size_t vl)744 vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
745   return vadd_vx_u32m1(op1, op2, vl);
746 }
747 
748 //
749 // CHECK-RV64-LABEL: @test_vadd_vv_u32m2(
750 // CHECK-RV64-NEXT:  entry:
751 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
752 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
753 //
test_vadd_vv_u32m2(vuint32m2_t op1,vuint32m2_t op2,size_t vl)754 vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
755   return vadd_vv_u32m2(op1, op2, vl);
756 }
757 
758 //
759 // CHECK-RV64-LABEL: @test_vadd_vx_u32m2(
760 // CHECK-RV64-NEXT:  entry:
761 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
762 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
763 //
test_vadd_vx_u32m2(vuint32m2_t op1,uint32_t op2,size_t vl)764 vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
765   return vadd_vx_u32m2(op1, op2, vl);
766 }
767 
768 //
769 // CHECK-RV64-LABEL: @test_vadd_vv_u32m4(
770 // CHECK-RV64-NEXT:  entry:
771 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
772 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
773 //
test_vadd_vv_u32m4(vuint32m4_t op1,vuint32m4_t op2,size_t vl)774 vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
775   return vadd_vv_u32m4(op1, op2, vl);
776 }
777 
778 //
779 // CHECK-RV64-LABEL: @test_vadd_vx_u32m4(
780 // CHECK-RV64-NEXT:  entry:
781 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
782 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
783 //
test_vadd_vx_u32m4(vuint32m4_t op1,uint32_t op2,size_t vl)784 vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
785   return vadd_vx_u32m4(op1, op2, vl);
786 }
787 
788 //
789 // CHECK-RV64-LABEL: @test_vadd_vv_u32m8(
790 // CHECK-RV64-NEXT:  entry:
791 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
792 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
793 //
test_vadd_vv_u32m8(vuint32m8_t op1,vuint32m8_t op2,size_t vl)794 vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
795   return vadd_vv_u32m8(op1, op2, vl);
796 }
797 
798 //
799 // CHECK-RV64-LABEL: @test_vadd_vx_u32m8(
800 // CHECK-RV64-NEXT:  entry:
801 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
802 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
803 //
test_vadd_vx_u32m8(vuint32m8_t op1,uint32_t op2,size_t vl)804 vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
805   return vadd_vx_u32m8(op1, op2, vl);
806 }
807 
808 //
809 // CHECK-RV64-LABEL: @test_vadd_vv_u64m1(
810 // CHECK-RV64-NEXT:  entry:
811 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
812 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
813 //
test_vadd_vv_u64m1(vuint64m1_t op1,vuint64m1_t op2,size_t vl)814 vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
815   return vadd_vv_u64m1(op1, op2, vl);
816 }
817 
818 //
819 // CHECK-RV64-LABEL: @test_vadd_vx_u64m1(
820 // CHECK-RV64-NEXT:  entry:
821 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
822 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
823 //
test_vadd_vx_u64m1(vuint64m1_t op1,uint64_t op2,size_t vl)824 vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
825   return vadd_vx_u64m1(op1, op2, vl);
826 }
827 
828 //
829 // CHECK-RV64-LABEL: @test_vadd_vv_u64m2(
830 // CHECK-RV64-NEXT:  entry:
831 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
832 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
833 //
test_vadd_vv_u64m2(vuint64m2_t op1,vuint64m2_t op2,size_t vl)834 vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
835   return vadd_vv_u64m2(op1, op2, vl);
836 }
837 
838 //
839 // CHECK-RV64-LABEL: @test_vadd_vx_u64m2(
840 // CHECK-RV64-NEXT:  entry:
841 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
842 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
843 //
test_vadd_vx_u64m2(vuint64m2_t op1,uint64_t op2,size_t vl)844 vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
845   return vadd_vx_u64m2(op1, op2, vl);
846 }
847 
848 //
849 // CHECK-RV64-LABEL: @test_vadd_vv_u64m4(
850 // CHECK-RV64-NEXT:  entry:
851 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
852 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
853 //
test_vadd_vv_u64m4(vuint64m4_t op1,vuint64m4_t op2,size_t vl)854 vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
855   return vadd_vv_u64m4(op1, op2, vl);
856 }
857 
858 //
859 // CHECK-RV64-LABEL: @test_vadd_vx_u64m4(
860 // CHECK-RV64-NEXT:  entry:
861 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
862 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
863 //
test_vadd_vx_u64m4(vuint64m4_t op1,uint64_t op2,size_t vl)864 vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
865   return vadd_vx_u64m4(op1, op2, vl);
866 }
867 
868 //
869 // CHECK-RV64-LABEL: @test_vadd_vv_u64m8(
870 // CHECK-RV64-NEXT:  entry:
871 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
872 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
873 //
test_vadd_vv_u64m8(vuint64m8_t op1,vuint64m8_t op2,size_t vl)874 vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
875   return vadd_vv_u64m8(op1, op2, vl);
876 }
877 
878 //
879 // CHECK-RV64-LABEL: @test_vadd_vx_u64m8(
880 // CHECK-RV64-NEXT:  entry:
881 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
882 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
883 //
test_vadd_vx_u64m8(vuint64m8_t op1,uint64_t op2,size_t vl)884 vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
885   return vadd_vx_u64m8(op1, op2, vl);
886 }
887 
888 //
889 // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_m(
890 // CHECK-RV64-NEXT:  entry:
891 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
892 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
893 //
test_vadd_vv_i8mf8_m(vbool64_t mask,vint8mf8_t maskedoff,vint8mf8_t op1,vint8mf8_t op2,size_t vl)894 vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
895   return vadd_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
896 }
897 
898 //
899 // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_m(
900 // CHECK-RV64-NEXT:  entry:
901 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
902 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
903 //
test_vadd_vx_i8mf8_m(vbool64_t mask,vint8mf8_t maskedoff,vint8mf8_t op1,int8_t op2,size_t vl)904 vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
905   return vadd_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
906 }
907 
908 //
909 // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_m(
910 // CHECK-RV64-NEXT:  entry:
911 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
912 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
913 //
test_vadd_vv_i8mf4_m(vbool32_t mask,vint8mf4_t maskedoff,vint8mf4_t op1,vint8mf4_t op2,size_t vl)914 vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
915   return vadd_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
916 }
917 
918 //
919 // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_m(
920 // CHECK-RV64-NEXT:  entry:
921 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
922 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
923 //
test_vadd_vx_i8mf4_m(vbool32_t mask,vint8mf4_t maskedoff,vint8mf4_t op1,int8_t op2,size_t vl)924 vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
925   return vadd_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
926 }
927 
928 //
929 // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_m(
930 // CHECK-RV64-NEXT:  entry:
931 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
932 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
933 //
test_vadd_vv_i8mf2_m(vbool16_t mask,vint8mf2_t maskedoff,vint8mf2_t op1,vint8mf2_t op2,size_t vl)934 vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
935   return vadd_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
936 }
937 
938 //
939 // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_m(
940 // CHECK-RV64-NEXT:  entry:
941 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
942 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
943 //
test_vadd_vx_i8mf2_m(vbool16_t mask,vint8mf2_t maskedoff,vint8mf2_t op1,int8_t op2,size_t vl)944 vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
945   return vadd_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
946 }
947 
948 //
949 // CHECK-RV64-LABEL: @test_vadd_vv_i8m1_m(
950 // CHECK-RV64-NEXT:  entry:
951 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
952 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
953 //
test_vadd_vv_i8m1_m(vbool8_t mask,vint8m1_t maskedoff,vint8m1_t op1,vint8m1_t op2,size_t vl)954 vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
955   return vadd_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
956 }
957 
958 //
959 // CHECK-RV64-LABEL: @test_vadd_vx_i8m1_m(
960 // CHECK-RV64-NEXT:  entry:
961 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
962 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
963 //
test_vadd_vx_i8m1_m(vbool8_t mask,vint8m1_t maskedoff,vint8m1_t op1,int8_t op2,size_t vl)964 vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
965   return vadd_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
966 }
967 
968 //
969 // CHECK-RV64-LABEL: @test_vadd_vv_i8m2_m(
970 // CHECK-RV64-NEXT:  entry:
971 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
972 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
973 //
test_vadd_vv_i8m2_m(vbool4_t mask,vint8m2_t maskedoff,vint8m2_t op1,vint8m2_t op2,size_t vl)974 vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
975   return vadd_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
976 }
977 
978 //
979 // CHECK-RV64-LABEL: @test_vadd_vx_i8m2_m(
980 // CHECK-RV64-NEXT:  entry:
981 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
982 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
983 //
test_vadd_vx_i8m2_m(vbool4_t mask,vint8m2_t maskedoff,vint8m2_t op1,int8_t op2,size_t vl)984 vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
985   return vadd_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
986 }
987 
988 //
989 // CHECK-RV64-LABEL: @test_vadd_vv_i8m4_m(
990 // CHECK-RV64-NEXT:  entry:
991 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
992 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
993 //
test_vadd_vv_i8m4_m(vbool2_t mask,vint8m4_t maskedoff,vint8m4_t op1,vint8m4_t op2,size_t vl)994 vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
995   return vadd_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
996 }
997 
998 //
999 // CHECK-RV64-LABEL: @test_vadd_vx_i8m4_m(
1000 // CHECK-RV64-NEXT:  entry:
1001 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1002 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
1003 //
test_vadd_vx_i8m4_m(vbool2_t mask,vint8m4_t maskedoff,vint8m4_t op1,int8_t op2,size_t vl)1004 vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
1005   return vadd_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
1006 }
1007 
1008 //
1009 // CHECK-RV64-LABEL: @test_vadd_vv_i8m8_m(
1010 // CHECK-RV64-NEXT:  entry:
1011 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1012 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
1013 //
test_vadd_vv_i8m8_m(vbool1_t mask,vint8m8_t maskedoff,vint8m8_t op1,vint8m8_t op2,size_t vl)1014 vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
1015   return vadd_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
1016 }
1017 
1018 //
1019 // CHECK-RV64-LABEL: @test_vadd_vx_i8m8_m(
1020 // CHECK-RV64-NEXT:  entry:
1021 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1022 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
1023 //
test_vadd_vx_i8m8_m(vbool1_t mask,vint8m8_t maskedoff,vint8m8_t op1,int8_t op2,size_t vl)1024 vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
1025   return vadd_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
1026 }
1027 
1028 //
1029 // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_m(
1030 // CHECK-RV64-NEXT:  entry:
1031 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1032 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
1033 //
test_vadd_vv_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,vint16mf4_t op2,size_t vl)1034 vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
1035   return vadd_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
1036 }
1037 
1038 //
1039 // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_m(
1040 // CHECK-RV64-NEXT:  entry:
1041 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1042 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
1043 //
test_vadd_vx_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,int16_t op2,size_t vl)1044 vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
1045   return vadd_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
1046 }
1047 
1048 //
1049 // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_m(
1050 // CHECK-RV64-NEXT:  entry:
1051 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1052 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
1053 //
test_vadd_vv_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,vint16mf2_t op2,size_t vl)1054 vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
1055   return vadd_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
1056 }
1057 
1058 //
1059 // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_m(
1060 // CHECK-RV64-NEXT:  entry:
1061 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1062 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
1063 //
test_vadd_vx_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,int16_t op2,size_t vl)1064 vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
1065   return vadd_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
1066 }
1067 
1068 //
1069 // CHECK-RV64-LABEL: @test_vadd_vv_i16m1_m(
1070 // CHECK-RV64-NEXT:  entry:
1071 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1072 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
1073 //
test_vadd_vv_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,vint16m1_t op2,size_t vl)1074 vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
1075   return vadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
1076 }
1077 
1078 //
1079 // CHECK-RV64-LABEL: @test_vadd_vx_i16m1_m(
1080 // CHECK-RV64-NEXT:  entry:
1081 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1082 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
1083 //
test_vadd_vx_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,int16_t op2,size_t vl)1084 vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
1085   return vadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
1086 }
1087 
1088 //
1089 // CHECK-RV64-LABEL: @test_vadd_vv_i16m2_m(
1090 // CHECK-RV64-NEXT:  entry:
1091 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1092 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
1093 //
test_vadd_vv_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,vint16m2_t op2,size_t vl)1094 vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
1095   return vadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
1096 }
1097 
1098 //
1099 // CHECK-RV64-LABEL: @test_vadd_vx_i16m2_m(
1100 // CHECK-RV64-NEXT:  entry:
1101 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1102 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
1103 //
test_vadd_vx_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,int16_t op2,size_t vl)1104 vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
1105   return vadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
1106 }
1107 
1108 //
1109 // CHECK-RV64-LABEL: @test_vadd_vv_i16m4_m(
1110 // CHECK-RV64-NEXT:  entry:
1111 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1112 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1113 //
test_vadd_vv_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,vint16m4_t op2,size_t vl)1114 vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
1115   return vadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
1116 }
1117 
1118 //
1119 // CHECK-RV64-LABEL: @test_vadd_vx_i16m4_m(
1120 // CHECK-RV64-NEXT:  entry:
1121 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1122 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1123 //
test_vadd_vx_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,int16_t op2,size_t vl)1124 vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
1125   return vadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
1126 }
1127 
1128 //
1129 // CHECK-RV64-LABEL: @test_vadd_vv_i16m8_m(
1130 // CHECK-RV64-NEXT:  entry:
1131 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1132 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1133 //
test_vadd_vv_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,vint16m8_t op2,size_t vl)1134 vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
1135   return vadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
1136 }
1137 
1138 //
1139 // CHECK-RV64-LABEL: @test_vadd_vx_i16m8_m(
1140 // CHECK-RV64-NEXT:  entry:
1141 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1142 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1143 //
test_vadd_vx_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,int16_t op2,size_t vl)1144 vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
1145   return vadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
1146 }
1147 
1148 //
1149 // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_m(
1150 // CHECK-RV64-NEXT:  entry:
1151 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1152 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1153 //
test_vadd_vv_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,vint32mf2_t op2,size_t vl)1154 vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
1155   return vadd_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
1156 }
1157 
1158 //
1159 // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_m(
1160 // CHECK-RV64-NEXT:  entry:
1161 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1162 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1163 //
test_vadd_vx_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,int32_t op2,size_t vl)1164 vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
1165   return vadd_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
1166 }
1167 
1168 //
1169 // CHECK-RV64-LABEL: @test_vadd_vv_i32m1_m(
1170 // CHECK-RV64-NEXT:  entry:
1171 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1172 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1173 //
test_vadd_vv_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,vint32m1_t op2,size_t vl)1174 vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
1175   return vadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
1176 }
1177 
1178 //
1179 // CHECK-RV64-LABEL: @test_vadd_vx_i32m1_m(
1180 // CHECK-RV64-NEXT:  entry:
1181 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1182 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1183 //
test_vadd_vx_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,int32_t op2,size_t vl)1184 vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
1185   return vadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
1186 }
1187 
1188 //
1189 // CHECK-RV64-LABEL: @test_vadd_vv_i32m2_m(
1190 // CHECK-RV64-NEXT:  entry:
1191 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1192 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1193 //
test_vadd_vv_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,vint32m2_t op2,size_t vl)1194 vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
1195   return vadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
1196 }
1197 
1198 //
1199 // CHECK-RV64-LABEL: @test_vadd_vx_i32m2_m(
1200 // CHECK-RV64-NEXT:  entry:
1201 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1202 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1203 //
test_vadd_vx_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,int32_t op2,size_t vl)1204 vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
1205   return vadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
1206 }
1207 
1208 //
1209 // CHECK-RV64-LABEL: @test_vadd_vv_i32m4_m(
1210 // CHECK-RV64-NEXT:  entry:
1211 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1212 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1213 //
test_vadd_vv_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,vint32m4_t op2,size_t vl)1214 vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
1215   return vadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
1216 }
1217 
1218 //
1219 // CHECK-RV64-LABEL: @test_vadd_vx_i32m4_m(
1220 // CHECK-RV64-NEXT:  entry:
1221 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1222 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1223 //
test_vadd_vx_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,int32_t op2,size_t vl)1224 vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
1225   return vadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
1226 }
1227 
1228 //
1229 // CHECK-RV64-LABEL: @test_vadd_vv_i32m8_m(
1230 // CHECK-RV64-NEXT:  entry:
1231 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1232 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1233 //
test_vadd_vv_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,vint32m8_t op2,size_t vl)1234 vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
1235   return vadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
1236 }
1237 
1238 //
1239 // CHECK-RV64-LABEL: @test_vadd_vx_i32m8_m(
1240 // CHECK-RV64-NEXT:  entry:
1241 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1242 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1243 //
test_vadd_vx_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,int32_t op2,size_t vl)1244 vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
1245   return vadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
1246 }
1247 
1248 //
1249 // CHECK-RV64-LABEL: @test_vadd_vv_i64m1_m(
1250 // CHECK-RV64-NEXT:  entry:
1251 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1252 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1253 //
test_vadd_vv_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,vint64m1_t op2,size_t vl)1254 vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
1255   return vadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
1256 }
1257 
1258 //
1259 // CHECK-RV64-LABEL: @test_vadd_vx_i64m1_m(
1260 // CHECK-RV64-NEXT:  entry:
1261 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1262 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1263 //
test_vadd_vx_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,int64_t op2,size_t vl)1264 vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
1265   return vadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
1266 }
1267 
1268 //
1269 // CHECK-RV64-LABEL: @test_vadd_vv_i64m2_m(
1270 // CHECK-RV64-NEXT:  entry:
1271 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1272 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1273 //
test_vadd_vv_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,vint64m2_t op2,size_t vl)1274 vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
1275   return vadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
1276 }
1277 
1278 //
1279 // CHECK-RV64-LABEL: @test_vadd_vx_i64m2_m(
1280 // CHECK-RV64-NEXT:  entry:
1281 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1282 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1283 //
test_vadd_vx_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,int64_t op2,size_t vl)1284 vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
1285   return vadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
1286 }
1287 
1288 //
1289 // CHECK-RV64-LABEL: @test_vadd_vv_i64m4_m(
1290 // CHECK-RV64-NEXT:  entry:
1291 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1292 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1293 //
test_vadd_vv_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,vint64m4_t op2,size_t vl)1294 vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
1295   return vadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
1296 }
1297 
1298 //
1299 // CHECK-RV64-LABEL: @test_vadd_vx_i64m4_m(
1300 // CHECK-RV64-NEXT:  entry:
1301 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1302 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1303 //
test_vadd_vx_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,int64_t op2,size_t vl)1304 vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
1305   return vadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
1306 }
1307 
1308 //
1309 // CHECK-RV64-LABEL: @test_vadd_vv_i64m8_m(
1310 // CHECK-RV64-NEXT:  entry:
1311 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1312 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1313 //
test_vadd_vv_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,vint64m8_t op2,size_t vl)1314 vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
1315   return vadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
1316 }
1317 
1318 //
1319 // CHECK-RV64-LABEL: @test_vadd_vx_i64m8_m(
1320 // CHECK-RV64-NEXT:  entry:
1321 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1322 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1323 //
test_vadd_vx_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,int64_t op2,size_t vl)1324 vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
1325   return vadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
1326 }
1327 
1328 //
1329 // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_m(
1330 // CHECK-RV64-NEXT:  entry:
1331 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1332 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
1333 //
test_vadd_vv_u8mf8_m(vbool64_t mask,vuint8mf8_t maskedoff,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)1334 vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
1335   return vadd_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
1336 }
1337 
1338 //
1339 // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_m(
1340 // CHECK-RV64-NEXT:  entry:
1341 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1342 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
1343 //
test_vadd_vx_u8mf8_m(vbool64_t mask,vuint8mf8_t maskedoff,vuint8mf8_t op1,uint8_t op2,size_t vl)1344 vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
1345   return vadd_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
1346 }
1347 
1348 //
1349 // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_m(
1350 // CHECK-RV64-NEXT:  entry:
1351 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1352 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
1353 //
test_vadd_vv_u8mf4_m(vbool32_t mask,vuint8mf4_t maskedoff,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)1354 vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
1355   return vadd_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
1356 }
1357 
1358 //
1359 // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_m(
1360 // CHECK-RV64-NEXT:  entry:
1361 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1362 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
1363 //
test_vadd_vx_u8mf4_m(vbool32_t mask,vuint8mf4_t maskedoff,vuint8mf4_t op1,uint8_t op2,size_t vl)1364 vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
1365   return vadd_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
1366 }
1367 
1368 //
1369 // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_m(
1370 // CHECK-RV64-NEXT:  entry:
1371 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1372 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
1373 //
test_vadd_vv_u8mf2_m(vbool16_t mask,vuint8mf2_t maskedoff,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)1374 vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
1375   return vadd_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
1376 }
1377 
1378 //
1379 // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_m(
1380 // CHECK-RV64-NEXT:  entry:
1381 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1382 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
1383 //
test_vadd_vx_u8mf2_m(vbool16_t mask,vuint8mf2_t maskedoff,vuint8mf2_t op1,uint8_t op2,size_t vl)1384 vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
1385   return vadd_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
1386 }
1387 
1388 //
1389 // CHECK-RV64-LABEL: @test_vadd_vv_u8m1_m(
1390 // CHECK-RV64-NEXT:  entry:
1391 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1392 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
1393 //
test_vadd_vv_u8m1_m(vbool8_t mask,vuint8m1_t maskedoff,vuint8m1_t op1,vuint8m1_t op2,size_t vl)1394 vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
1395   return vadd_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
1396 }
1397 
1398 //
1399 // CHECK-RV64-LABEL: @test_vadd_vx_u8m1_m(
1400 // CHECK-RV64-NEXT:  entry:
1401 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1402 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
1403 //
test_vadd_vx_u8m1_m(vbool8_t mask,vuint8m1_t maskedoff,vuint8m1_t op1,uint8_t op2,size_t vl)1404 vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
1405   return vadd_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
1406 }
1407 
1408 //
1409 // CHECK-RV64-LABEL: @test_vadd_vv_u8m2_m(
1410 // CHECK-RV64-NEXT:  entry:
1411 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1412 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
1413 //
test_vadd_vv_u8m2_m(vbool4_t mask,vuint8m2_t maskedoff,vuint8m2_t op1,vuint8m2_t op2,size_t vl)1414 vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
1415   return vadd_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
1416 }
1417 
1418 //
1419 // CHECK-RV64-LABEL: @test_vadd_vx_u8m2_m(
1420 // CHECK-RV64-NEXT:  entry:
1421 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1422 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
1423 //
test_vadd_vx_u8m2_m(vbool4_t mask,vuint8m2_t maskedoff,vuint8m2_t op1,uint8_t op2,size_t vl)1424 vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
1425   return vadd_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
1426 }
1427 
1428 //
1429 // CHECK-RV64-LABEL: @test_vadd_vv_u8m4_m(
1430 // CHECK-RV64-NEXT:  entry:
1431 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1432 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
1433 //
test_vadd_vv_u8m4_m(vbool2_t mask,vuint8m4_t maskedoff,vuint8m4_t op1,vuint8m4_t op2,size_t vl)1434 vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
1435   return vadd_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
1436 }
1437 
1438 //
1439 // CHECK-RV64-LABEL: @test_vadd_vx_u8m4_m(
1440 // CHECK-RV64-NEXT:  entry:
1441 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1442 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
1443 //
test_vadd_vx_u8m4_m(vbool2_t mask,vuint8m4_t maskedoff,vuint8m4_t op1,uint8_t op2,size_t vl)1444 vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
1445   return vadd_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
1446 }
1447 
1448 //
1449 // CHECK-RV64-LABEL: @test_vadd_vv_u8m8_m(
1450 // CHECK-RV64-NEXT:  entry:
1451 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1452 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
1453 //
test_vadd_vv_u8m8_m(vbool1_t mask,vuint8m8_t maskedoff,vuint8m8_t op1,vuint8m8_t op2,size_t vl)1454 vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
1455   return vadd_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
1456 }
1457 
1458 //
1459 // CHECK-RV64-LABEL: @test_vadd_vx_u8m8_m(
1460 // CHECK-RV64-NEXT:  entry:
1461 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1462 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
1463 //
test_vadd_vx_u8m8_m(vbool1_t mask,vuint8m8_t maskedoff,vuint8m8_t op1,uint8_t op2,size_t vl)1464 vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
1465   return vadd_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
1466 }
1467 
1468 //
1469 // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_m(
1470 // CHECK-RV64-NEXT:  entry:
1471 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1472 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
1473 //
test_vadd_vv_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)1474 vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
1475   return vadd_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
1476 }
1477 
1478 //
1479 // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_m(
1480 // CHECK-RV64-NEXT:  entry:
1481 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1482 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
1483 //
test_vadd_vx_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,uint16_t op2,size_t vl)1484 vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
1485   return vadd_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
1486 }
1487 
1488 //
1489 // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_m(
1490 // CHECK-RV64-NEXT:  entry:
1491 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1492 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
1493 //
test_vadd_vv_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)1494 vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
1495   return vadd_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
1496 }
1497 
1498 //
1499 // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_m(
1500 // CHECK-RV64-NEXT:  entry:
1501 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1502 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
1503 //
test_vadd_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,uint16_t op2,size_t vl)1504 vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
1505   return vadd_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
1506 }
1507 
1508 //
1509 // CHECK-RV64-LABEL: @test_vadd_vv_u16m1_m(
1510 // CHECK-RV64-NEXT:  entry:
1511 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1512 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
1513 //
test_vadd_vv_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,vuint16m1_t op2,size_t vl)1514 vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
1515   return vadd_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
1516 }
1517 
1518 //
1519 // CHECK-RV64-LABEL: @test_vadd_vx_u16m1_m(
1520 // CHECK-RV64-NEXT:  entry:
1521 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1522 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
1523 //
test_vadd_vx_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,uint16_t op2,size_t vl)1524 vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
1525   return vadd_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
1526 }
1527 
1528 //
1529 // CHECK-RV64-LABEL: @test_vadd_vv_u16m2_m(
1530 // CHECK-RV64-NEXT:  entry:
1531 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1532 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
1533 //
test_vadd_vv_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,vuint16m2_t op2,size_t vl)1534 vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
1535   return vadd_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
1536 }
1537 
1538 //
1539 // CHECK-RV64-LABEL: @test_vadd_vx_u16m2_m(
1540 // CHECK-RV64-NEXT:  entry:
1541 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1542 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
1543 //
test_vadd_vx_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,uint16_t op2,size_t vl)1544 vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
1545   return vadd_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
1546 }
1547 
1548 //
1549 // CHECK-RV64-LABEL: @test_vadd_vv_u16m4_m(
1550 // CHECK-RV64-NEXT:  entry:
1551 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1552 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1553 //
test_vadd_vv_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,vuint16m4_t op2,size_t vl)1554 vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
1555   return vadd_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
1556 }
1557 
1558 //
1559 // CHECK-RV64-LABEL: @test_vadd_vx_u16m4_m(
1560 // CHECK-RV64-NEXT:  entry:
1561 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1562 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1563 //
test_vadd_vx_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,uint16_t op2,size_t vl)1564 vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
1565   return vadd_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
1566 }
1567 
1568 //
1569 // CHECK-RV64-LABEL: @test_vadd_vv_u16m8_m(
1570 // CHECK-RV64-NEXT:  entry:
1571 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1572 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1573 //
test_vadd_vv_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,vuint16m8_t op2,size_t vl)1574 vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
1575   return vadd_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
1576 }
1577 
1578 //
1579 // CHECK-RV64-LABEL: @test_vadd_vx_u16m8_m(
1580 // CHECK-RV64-NEXT:  entry:
1581 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1582 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1583 //
test_vadd_vx_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,uint16_t op2,size_t vl)1584 vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
1585   return vadd_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
1586 }
1587 
1588 //
1589 // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_m(
1590 // CHECK-RV64-NEXT:  entry:
1591 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1592 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1593 //
test_vadd_vv_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)1594 vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
1595   return vadd_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
1596 }
1597 
1598 //
1599 // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_m(
1600 // CHECK-RV64-NEXT:  entry:
1601 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1602 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1603 //
test_vadd_vx_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,uint32_t op2,size_t vl)1604 vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
1605   return vadd_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
1606 }
1607 
1608 //
1609 // CHECK-RV64-LABEL: @test_vadd_vv_u32m1_m(
1610 // CHECK-RV64-NEXT:  entry:
1611 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1612 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1613 //
test_vadd_vv_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,vuint32m1_t op2,size_t vl)1614 vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
1615   return vadd_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
1616 }
1617 
1618 //
1619 // CHECK-RV64-LABEL: @test_vadd_vx_u32m1_m(
1620 // CHECK-RV64-NEXT:  entry:
1621 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1622 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1623 //
test_vadd_vx_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,uint32_t op2,size_t vl)1624 vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
1625   return vadd_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
1626 }
1627 
1628 //
1629 // CHECK-RV64-LABEL: @test_vadd_vv_u32m2_m(
1630 // CHECK-RV64-NEXT:  entry:
1631 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1632 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1633 //
test_vadd_vv_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,vuint32m2_t op2,size_t vl)1634 vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
1635   return vadd_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
1636 }
1637 
1638 //
1639 // CHECK-RV64-LABEL: @test_vadd_vx_u32m2_m(
1640 // CHECK-RV64-NEXT:  entry:
1641 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1642 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1643 //
test_vadd_vx_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,uint32_t op2,size_t vl)1644 vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
1645   return vadd_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
1646 }
1647 
1648 //
1649 // CHECK-RV64-LABEL: @test_vadd_vv_u32m4_m(
1650 // CHECK-RV64-NEXT:  entry:
1651 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1652 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1653 //
test_vadd_vv_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,vuint32m4_t op2,size_t vl)1654 vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
1655   return vadd_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
1656 }
1657 
1658 //
1659 // CHECK-RV64-LABEL: @test_vadd_vx_u32m4_m(
1660 // CHECK-RV64-NEXT:  entry:
1661 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1662 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1663 //
test_vadd_vx_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,uint32_t op2,size_t vl)1664 vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
1665   return vadd_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
1666 }
1667 
1668 //
1669 // CHECK-RV64-LABEL: @test_vadd_vv_u32m8_m(
1670 // CHECK-RV64-NEXT:  entry:
1671 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1672 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1673 //
test_vadd_vv_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,vuint32m8_t op2,size_t vl)1674 vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
1675   return vadd_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
1676 }
1677 
1678 //
1679 // CHECK-RV64-LABEL: @test_vadd_vx_u32m8_m(
1680 // CHECK-RV64-NEXT:  entry:
1681 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1682 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1683 //
test_vadd_vx_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,uint32_t op2,size_t vl)1684 vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
1685   return vadd_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
1686 }
1687 
1688 //
1689 // CHECK-RV64-LABEL: @test_vadd_vv_u64m1_m(
1690 // CHECK-RV64-NEXT:  entry:
1691 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1692 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1693 //
test_vadd_vv_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,vuint64m1_t op2,size_t vl)1694 vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
1695   return vadd_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
1696 }
1697 
1698 //
1699 // CHECK-RV64-LABEL: @test_vadd_vx_u64m1_m(
1700 // CHECK-RV64-NEXT:  entry:
1701 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1702 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1703 //
test_vadd_vx_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,uint64_t op2,size_t vl)1704 vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
1705   return vadd_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
1706 }
1707 
1708 //
1709 // CHECK-RV64-LABEL: @test_vadd_vv_u64m2_m(
1710 // CHECK-RV64-NEXT:  entry:
1711 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1712 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1713 //
test_vadd_vv_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,vuint64m2_t op2,size_t vl)1714 vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
1715   return vadd_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
1716 }
1717 
1718 //
1719 // CHECK-RV64-LABEL: @test_vadd_vx_u64m2_m(
1720 // CHECK-RV64-NEXT:  entry:
1721 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1722 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1723 //
test_vadd_vx_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,uint64_t op2,size_t vl)1724 vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
1725   return vadd_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
1726 }
1727 
1728 //
1729 // CHECK-RV64-LABEL: @test_vadd_vv_u64m4_m(
1730 // CHECK-RV64-NEXT:  entry:
1731 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1732 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1733 //
test_vadd_vv_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,vuint64m4_t op2,size_t vl)1734 vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
1735   return vadd_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
1736 }
1737 
1738 //
1739 // CHECK-RV64-LABEL: @test_vadd_vx_u64m4_m(
1740 // CHECK-RV64-NEXT:  entry:
1741 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1742 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1743 //
test_vadd_vx_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,uint64_t op2,size_t vl)1744 vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
1745   return vadd_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
1746 }
1747 
1748 //
1749 // CHECK-RV64-LABEL: @test_vadd_vv_u64m8_m(
1750 // CHECK-RV64-NEXT:  entry:
1751 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1752 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1753 //
test_vadd_vv_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,vuint64m8_t op2,size_t vl)1754 vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
1755   return vadd_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
1756 }
1757 
1758 //
1759 // CHECK-RV64-LABEL: @test_vadd_vx_u64m8_m(
1760 // CHECK-RV64-NEXT:  entry:
1761 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1762 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1763 //
test_vadd_vx_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,uint64_t op2,size_t vl)1764 vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
1765   return vadd_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
1766 }
1767 
1768