1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
4 
5 #include <riscv_vector.h>
6 
7 //
8 // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8(
9 // CHECK-RV64-NEXT:  entry:
10 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
11 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
12 //
test_vsadd_vv_i8mf8(vint8mf8_t op1,vint8mf8_t op2,size_t vl)13 vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
14   return vsadd_vv_i8mf8(op1, op2, vl);
15 }
16 
17 //
18 // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8(
19 // CHECK-RV64-NEXT:  entry:
20 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
21 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
22 //
test_vsadd_vx_i8mf8(vint8mf8_t op1,int8_t op2,size_t vl)23 vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
24   return vsadd_vx_i8mf8(op1, op2, vl);
25 }
26 
27 //
28 // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4(
29 // CHECK-RV64-NEXT:  entry:
30 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
31 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
32 //
test_vsadd_vv_i8mf4(vint8mf4_t op1,vint8mf4_t op2,size_t vl)33 vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
34   return vsadd_vv_i8mf4(op1, op2, vl);
35 }
36 
37 //
38 // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4(
39 // CHECK-RV64-NEXT:  entry:
40 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
41 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
42 //
test_vsadd_vx_i8mf4(vint8mf4_t op1,int8_t op2,size_t vl)43 vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
44   return vsadd_vx_i8mf4(op1, op2, vl);
45 }
46 
47 //
48 // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2(
49 // CHECK-RV64-NEXT:  entry:
50 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
51 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
52 //
test_vsadd_vv_i8mf2(vint8mf2_t op1,vint8mf2_t op2,size_t vl)53 vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
54   return vsadd_vv_i8mf2(op1, op2, vl);
55 }
56 
57 //
58 // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2(
59 // CHECK-RV64-NEXT:  entry:
60 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
61 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
62 //
test_vsadd_vx_i8mf2(vint8mf2_t op1,int8_t op2,size_t vl)63 vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
64   return vsadd_vx_i8mf2(op1, op2, vl);
65 }
66 
67 //
68 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1(
69 // CHECK-RV64-NEXT:  entry:
70 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
71 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
72 //
test_vsadd_vv_i8m1(vint8m1_t op1,vint8m1_t op2,size_t vl)73 vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
74   return vsadd_vv_i8m1(op1, op2, vl);
75 }
76 
77 //
78 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1(
79 // CHECK-RV64-NEXT:  entry:
80 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
81 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
82 //
test_vsadd_vx_i8m1(vint8m1_t op1,int8_t op2,size_t vl)83 vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
84   return vsadd_vx_i8m1(op1, op2, vl);
85 }
86 
87 //
88 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2(
89 // CHECK-RV64-NEXT:  entry:
90 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
91 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
92 //
test_vsadd_vv_i8m2(vint8m2_t op1,vint8m2_t op2,size_t vl)93 vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
94   return vsadd_vv_i8m2(op1, op2, vl);
95 }
96 
97 //
98 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2(
99 // CHECK-RV64-NEXT:  entry:
100 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
101 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
102 //
test_vsadd_vx_i8m2(vint8m2_t op1,int8_t op2,size_t vl)103 vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
104   return vsadd_vx_i8m2(op1, op2, vl);
105 }
106 
107 //
108 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4(
109 // CHECK-RV64-NEXT:  entry:
110 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
111 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
112 //
test_vsadd_vv_i8m4(vint8m4_t op1,vint8m4_t op2,size_t vl)113 vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
114   return vsadd_vv_i8m4(op1, op2, vl);
115 }
116 
117 //
118 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4(
119 // CHECK-RV64-NEXT:  entry:
120 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
121 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
122 //
test_vsadd_vx_i8m4(vint8m4_t op1,int8_t op2,size_t vl)123 vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
124   return vsadd_vx_i8m4(op1, op2, vl);
125 }
126 
127 //
128 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8(
129 // CHECK-RV64-NEXT:  entry:
130 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
131 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
132 //
test_vsadd_vv_i8m8(vint8m8_t op1,vint8m8_t op2,size_t vl)133 vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
134   return vsadd_vv_i8m8(op1, op2, vl);
135 }
136 
137 //
138 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8(
139 // CHECK-RV64-NEXT:  entry:
140 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
141 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
142 //
test_vsadd_vx_i8m8(vint8m8_t op1,int8_t op2,size_t vl)143 vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
144   return vsadd_vx_i8m8(op1, op2, vl);
145 }
146 
147 //
148 // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4(
149 // CHECK-RV64-NEXT:  entry:
150 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
151 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
152 //
test_vsadd_vv_i16mf4(vint16mf4_t op1,vint16mf4_t op2,size_t vl)153 vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
154   return vsadd_vv_i16mf4(op1, op2, vl);
155 }
156 
157 //
158 // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4(
159 // CHECK-RV64-NEXT:  entry:
160 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
161 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
162 //
test_vsadd_vx_i16mf4(vint16mf4_t op1,int16_t op2,size_t vl)163 vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
164   return vsadd_vx_i16mf4(op1, op2, vl);
165 }
166 
167 //
168 // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2(
169 // CHECK-RV64-NEXT:  entry:
170 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
171 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
172 //
test_vsadd_vv_i16mf2(vint16mf2_t op1,vint16mf2_t op2,size_t vl)173 vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
174   return vsadd_vv_i16mf2(op1, op2, vl);
175 }
176 
177 //
178 // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2(
179 // CHECK-RV64-NEXT:  entry:
180 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
181 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
182 //
test_vsadd_vx_i16mf2(vint16mf2_t op1,int16_t op2,size_t vl)183 vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
184   return vsadd_vx_i16mf2(op1, op2, vl);
185 }
186 
187 //
188 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1(
189 // CHECK-RV64-NEXT:  entry:
190 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
191 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
192 //
test_vsadd_vv_i16m1(vint16m1_t op1,vint16m1_t op2,size_t vl)193 vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
194   return vsadd_vv_i16m1(op1, op2, vl);
195 }
196 
197 //
198 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1(
199 // CHECK-RV64-NEXT:  entry:
200 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
201 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
202 //
test_vsadd_vx_i16m1(vint16m1_t op1,int16_t op2,size_t vl)203 vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
204   return vsadd_vx_i16m1(op1, op2, vl);
205 }
206 
207 //
208 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2(
209 // CHECK-RV64-NEXT:  entry:
210 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
211 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
212 //
test_vsadd_vv_i16m2(vint16m2_t op1,vint16m2_t op2,size_t vl)213 vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
214   return vsadd_vv_i16m2(op1, op2, vl);
215 }
216 
217 //
218 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2(
219 // CHECK-RV64-NEXT:  entry:
220 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
221 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
222 //
test_vsadd_vx_i16m2(vint16m2_t op1,int16_t op2,size_t vl)223 vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
224   return vsadd_vx_i16m2(op1, op2, vl);
225 }
226 
227 //
228 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4(
229 // CHECK-RV64-NEXT:  entry:
230 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
231 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
232 //
test_vsadd_vv_i16m4(vint16m4_t op1,vint16m4_t op2,size_t vl)233 vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
234   return vsadd_vv_i16m4(op1, op2, vl);
235 }
236 
237 //
238 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4(
239 // CHECK-RV64-NEXT:  entry:
240 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
241 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
242 //
test_vsadd_vx_i16m4(vint16m4_t op1,int16_t op2,size_t vl)243 vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
244   return vsadd_vx_i16m4(op1, op2, vl);
245 }
246 
247 //
248 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8(
249 // CHECK-RV64-NEXT:  entry:
250 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
251 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
252 //
test_vsadd_vv_i16m8(vint16m8_t op1,vint16m8_t op2,size_t vl)253 vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
254   return vsadd_vv_i16m8(op1, op2, vl);
255 }
256 
257 //
258 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8(
259 // CHECK-RV64-NEXT:  entry:
260 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
261 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
262 //
test_vsadd_vx_i16m8(vint16m8_t op1,int16_t op2,size_t vl)263 vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
264   return vsadd_vx_i16m8(op1, op2, vl);
265 }
266 
267 //
268 // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2(
269 // CHECK-RV64-NEXT:  entry:
270 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
271 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
272 //
test_vsadd_vv_i32mf2(vint32mf2_t op1,vint32mf2_t op2,size_t vl)273 vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
274   return vsadd_vv_i32mf2(op1, op2, vl);
275 }
276 
277 //
278 // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2(
279 // CHECK-RV64-NEXT:  entry:
280 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
281 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
282 //
test_vsadd_vx_i32mf2(vint32mf2_t op1,int32_t op2,size_t vl)283 vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
284   return vsadd_vx_i32mf2(op1, op2, vl);
285 }
286 
287 //
288 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1(
289 // CHECK-RV64-NEXT:  entry:
290 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
291 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
292 //
test_vsadd_vv_i32m1(vint32m1_t op1,vint32m1_t op2,size_t vl)293 vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
294   return vsadd_vv_i32m1(op1, op2, vl);
295 }
296 
297 //
298 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1(
299 // CHECK-RV64-NEXT:  entry:
300 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
301 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
302 //
test_vsadd_vx_i32m1(vint32m1_t op1,int32_t op2,size_t vl)303 vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
304   return vsadd_vx_i32m1(op1, op2, vl);
305 }
306 
307 //
308 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2(
309 // CHECK-RV64-NEXT:  entry:
310 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
311 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
312 //
test_vsadd_vv_i32m2(vint32m2_t op1,vint32m2_t op2,size_t vl)313 vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
314   return vsadd_vv_i32m2(op1, op2, vl);
315 }
316 
317 //
318 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2(
319 // CHECK-RV64-NEXT:  entry:
320 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
321 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
322 //
test_vsadd_vx_i32m2(vint32m2_t op1,int32_t op2,size_t vl)323 vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
324   return vsadd_vx_i32m2(op1, op2, vl);
325 }
326 
327 //
328 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4(
329 // CHECK-RV64-NEXT:  entry:
330 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
331 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
332 //
test_vsadd_vv_i32m4(vint32m4_t op1,vint32m4_t op2,size_t vl)333 vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
334   return vsadd_vv_i32m4(op1, op2, vl);
335 }
336 
337 //
338 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4(
339 // CHECK-RV64-NEXT:  entry:
340 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
341 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
342 //
test_vsadd_vx_i32m4(vint32m4_t op1,int32_t op2,size_t vl)343 vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
344   return vsadd_vx_i32m4(op1, op2, vl);
345 }
346 
347 //
348 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8(
349 // CHECK-RV64-NEXT:  entry:
350 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
351 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
352 //
test_vsadd_vv_i32m8(vint32m8_t op1,vint32m8_t op2,size_t vl)353 vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
354   return vsadd_vv_i32m8(op1, op2, vl);
355 }
356 
357 //
358 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8(
359 // CHECK-RV64-NEXT:  entry:
360 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
361 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
362 //
test_vsadd_vx_i32m8(vint32m8_t op1,int32_t op2,size_t vl)363 vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
364   return vsadd_vx_i32m8(op1, op2, vl);
365 }
366 
367 //
368 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1(
369 // CHECK-RV64-NEXT:  entry:
370 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
371 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
372 //
test_vsadd_vv_i64m1(vint64m1_t op1,vint64m1_t op2,size_t vl)373 vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
374   return vsadd_vv_i64m1(op1, op2, vl);
375 }
376 
377 //
378 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1(
379 // CHECK-RV64-NEXT:  entry:
380 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
381 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
382 //
test_vsadd_vx_i64m1(vint64m1_t op1,int64_t op2,size_t vl)383 vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
384   return vsadd_vx_i64m1(op1, op2, vl);
385 }
386 
387 //
388 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2(
389 // CHECK-RV64-NEXT:  entry:
390 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
391 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
392 //
test_vsadd_vv_i64m2(vint64m2_t op1,vint64m2_t op2,size_t vl)393 vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
394   return vsadd_vv_i64m2(op1, op2, vl);
395 }
396 
397 //
398 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2(
399 // CHECK-RV64-NEXT:  entry:
400 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
401 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
402 //
test_vsadd_vx_i64m2(vint64m2_t op1,int64_t op2,size_t vl)403 vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
404   return vsadd_vx_i64m2(op1, op2, vl);
405 }
406 
407 //
408 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4(
409 // CHECK-RV64-NEXT:  entry:
410 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
411 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
412 //
test_vsadd_vv_i64m4(vint64m4_t op1,vint64m4_t op2,size_t vl)413 vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
414   return vsadd_vv_i64m4(op1, op2, vl);
415 }
416 
417 //
418 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4(
419 // CHECK-RV64-NEXT:  entry:
420 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
421 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
422 //
test_vsadd_vx_i64m4(vint64m4_t op1,int64_t op2,size_t vl)423 vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
424   return vsadd_vx_i64m4(op1, op2, vl);
425 }
426 
427 //
428 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8(
429 // CHECK-RV64-NEXT:  entry:
430 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
431 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
432 //
test_vsadd_vv_i64m8(vint64m8_t op1,vint64m8_t op2,size_t vl)433 vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
434   return vsadd_vv_i64m8(op1, op2, vl);
435 }
436 
437 //
438 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8(
439 // CHECK-RV64-NEXT:  entry:
440 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
441 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
442 //
test_vsadd_vx_i64m8(vint64m8_t op1,int64_t op2,size_t vl)443 vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
444   return vsadd_vx_i64m8(op1, op2, vl);
445 }
446 
447 //
448 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8(
449 // CHECK-RV64-NEXT:  entry:
450 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
451 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
452 //
test_vsaddu_vv_u8mf8(vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)453 vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
454   return vsaddu_vv_u8mf8(op1, op2, vl);
455 }
456 
457 //
458 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8(
459 // CHECK-RV64-NEXT:  entry:
460 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
461 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
462 //
test_vsaddu_vx_u8mf8(vuint8mf8_t op1,uint8_t op2,size_t vl)463 vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
464   return vsaddu_vx_u8mf8(op1, op2, vl);
465 }
466 
467 //
468 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4(
469 // CHECK-RV64-NEXT:  entry:
470 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
471 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
472 //
test_vsaddu_vv_u8mf4(vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)473 vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
474   return vsaddu_vv_u8mf4(op1, op2, vl);
475 }
476 
477 //
478 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4(
479 // CHECK-RV64-NEXT:  entry:
480 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
481 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
482 //
test_vsaddu_vx_u8mf4(vuint8mf4_t op1,uint8_t op2,size_t vl)483 vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
484   return vsaddu_vx_u8mf4(op1, op2, vl);
485 }
486 
487 //
488 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2(
489 // CHECK-RV64-NEXT:  entry:
490 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
491 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
492 //
test_vsaddu_vv_u8mf2(vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)493 vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
494   return vsaddu_vv_u8mf2(op1, op2, vl);
495 }
496 
497 //
498 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2(
499 // CHECK-RV64-NEXT:  entry:
500 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
501 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
502 //
test_vsaddu_vx_u8mf2(vuint8mf2_t op1,uint8_t op2,size_t vl)503 vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
504   return vsaddu_vx_u8mf2(op1, op2, vl);
505 }
506 
507 //
508 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1(
509 // CHECK-RV64-NEXT:  entry:
510 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
511 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
512 //
test_vsaddu_vv_u8m1(vuint8m1_t op1,vuint8m1_t op2,size_t vl)513 vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
514   return vsaddu_vv_u8m1(op1, op2, vl);
515 }
516 
517 //
518 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1(
519 // CHECK-RV64-NEXT:  entry:
520 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
521 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
522 //
test_vsaddu_vx_u8m1(vuint8m1_t op1,uint8_t op2,size_t vl)523 vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
524   return vsaddu_vx_u8m1(op1, op2, vl);
525 }
526 
527 //
528 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2(
529 // CHECK-RV64-NEXT:  entry:
530 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
531 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
532 //
test_vsaddu_vv_u8m2(vuint8m2_t op1,vuint8m2_t op2,size_t vl)533 vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
534   return vsaddu_vv_u8m2(op1, op2, vl);
535 }
536 
537 //
538 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2(
539 // CHECK-RV64-NEXT:  entry:
540 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
541 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
542 //
test_vsaddu_vx_u8m2(vuint8m2_t op1,uint8_t op2,size_t vl)543 vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
544   return vsaddu_vx_u8m2(op1, op2, vl);
545 }
546 
547 //
548 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4(
549 // CHECK-RV64-NEXT:  entry:
550 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
551 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
552 //
test_vsaddu_vv_u8m4(vuint8m4_t op1,vuint8m4_t op2,size_t vl)553 vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
554   return vsaddu_vv_u8m4(op1, op2, vl);
555 }
556 
557 //
558 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4(
559 // CHECK-RV64-NEXT:  entry:
560 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
561 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
562 //
test_vsaddu_vx_u8m4(vuint8m4_t op1,uint8_t op2,size_t vl)563 vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
564   return vsaddu_vx_u8m4(op1, op2, vl);
565 }
566 
567 //
568 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8(
569 // CHECK-RV64-NEXT:  entry:
570 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
571 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
572 //
test_vsaddu_vv_u8m8(vuint8m8_t op1,vuint8m8_t op2,size_t vl)573 vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
574   return vsaddu_vv_u8m8(op1, op2, vl);
575 }
576 
577 //
578 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8(
579 // CHECK-RV64-NEXT:  entry:
580 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
581 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
582 //
test_vsaddu_vx_u8m8(vuint8m8_t op1,uint8_t op2,size_t vl)583 vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
584   return vsaddu_vx_u8m8(op1, op2, vl);
585 }
586 
587 //
588 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4(
589 // CHECK-RV64-NEXT:  entry:
590 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
591 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
592 //
test_vsaddu_vv_u16mf4(vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)593 vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
594                                    size_t vl) {
595   return vsaddu_vv_u16mf4(op1, op2, vl);
596 }
597 
598 //
599 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4(
600 // CHECK-RV64-NEXT:  entry:
601 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
602 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
603 //
test_vsaddu_vx_u16mf4(vuint16mf4_t op1,uint16_t op2,size_t vl)604 vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
605   return vsaddu_vx_u16mf4(op1, op2, vl);
606 }
607 
608 //
609 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2(
610 // CHECK-RV64-NEXT:  entry:
611 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
612 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
613 //
test_vsaddu_vv_u16mf2(vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)614 vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
615                                    size_t vl) {
616   return vsaddu_vv_u16mf2(op1, op2, vl);
617 }
618 
619 //
620 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2(
621 // CHECK-RV64-NEXT:  entry:
622 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
623 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
624 //
test_vsaddu_vx_u16mf2(vuint16mf2_t op1,uint16_t op2,size_t vl)625 vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
626   return vsaddu_vx_u16mf2(op1, op2, vl);
627 }
628 
629 //
630 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1(
631 // CHECK-RV64-NEXT:  entry:
632 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
633 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
634 //
test_vsaddu_vv_u16m1(vuint16m1_t op1,vuint16m1_t op2,size_t vl)635 vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
636   return vsaddu_vv_u16m1(op1, op2, vl);
637 }
638 
639 //
640 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1(
641 // CHECK-RV64-NEXT:  entry:
642 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
643 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
644 //
test_vsaddu_vx_u16m1(vuint16m1_t op1,uint16_t op2,size_t vl)645 vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
646   return vsaddu_vx_u16m1(op1, op2, vl);
647 }
648 
649 //
650 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2(
651 // CHECK-RV64-NEXT:  entry:
652 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
653 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
654 //
test_vsaddu_vv_u16m2(vuint16m2_t op1,vuint16m2_t op2,size_t vl)655 vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
656   return vsaddu_vv_u16m2(op1, op2, vl);
657 }
658 
659 //
660 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2(
661 // CHECK-RV64-NEXT:  entry:
662 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
663 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
664 //
test_vsaddu_vx_u16m2(vuint16m2_t op1,uint16_t op2,size_t vl)665 vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
666   return vsaddu_vx_u16m2(op1, op2, vl);
667 }
668 
669 //
670 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4(
671 // CHECK-RV64-NEXT:  entry:
672 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
673 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
674 //
test_vsaddu_vv_u16m4(vuint16m4_t op1,vuint16m4_t op2,size_t vl)675 vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
676   return vsaddu_vv_u16m4(op1, op2, vl);
677 }
678 
679 //
680 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4(
681 // CHECK-RV64-NEXT:  entry:
682 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
683 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
684 //
test_vsaddu_vx_u16m4(vuint16m4_t op1,uint16_t op2,size_t vl)685 vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
686   return vsaddu_vx_u16m4(op1, op2, vl);
687 }
688 
689 //
690 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8(
691 // CHECK-RV64-NEXT:  entry:
692 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
693 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
694 //
test_vsaddu_vv_u16m8(vuint16m8_t op1,vuint16m8_t op2,size_t vl)695 vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
696   return vsaddu_vv_u16m8(op1, op2, vl);
697 }
698 
699 //
700 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8(
701 // CHECK-RV64-NEXT:  entry:
702 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
703 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
704 //
test_vsaddu_vx_u16m8(vuint16m8_t op1,uint16_t op2,size_t vl)705 vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
706   return vsaddu_vx_u16m8(op1, op2, vl);
707 }
708 
709 //
710 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2(
711 // CHECK-RV64-NEXT:  entry:
712 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
713 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
714 //
test_vsaddu_vv_u32mf2(vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)715 vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
716                                    size_t vl) {
717   return vsaddu_vv_u32mf2(op1, op2, vl);
718 }
719 
720 //
721 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2(
722 // CHECK-RV64-NEXT:  entry:
723 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
724 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
725 //
test_vsaddu_vx_u32mf2(vuint32mf2_t op1,uint32_t op2,size_t vl)726 vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
727   return vsaddu_vx_u32mf2(op1, op2, vl);
728 }
729 
730 //
731 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1(
732 // CHECK-RV64-NEXT:  entry:
733 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
734 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
735 //
test_vsaddu_vv_u32m1(vuint32m1_t op1,vuint32m1_t op2,size_t vl)736 vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
737   return vsaddu_vv_u32m1(op1, op2, vl);
738 }
739 
740 //
741 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1(
742 // CHECK-RV64-NEXT:  entry:
743 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
744 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
745 //
test_vsaddu_vx_u32m1(vuint32m1_t op1,uint32_t op2,size_t vl)746 vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
747   return vsaddu_vx_u32m1(op1, op2, vl);
748 }
749 
750 //
751 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2(
752 // CHECK-RV64-NEXT:  entry:
753 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
754 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
755 //
test_vsaddu_vv_u32m2(vuint32m2_t op1,vuint32m2_t op2,size_t vl)756 vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
757   return vsaddu_vv_u32m2(op1, op2, vl);
758 }
759 
760 //
761 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2(
762 // CHECK-RV64-NEXT:  entry:
763 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
764 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
765 //
test_vsaddu_vx_u32m2(vuint32m2_t op1,uint32_t op2,size_t vl)766 vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
767   return vsaddu_vx_u32m2(op1, op2, vl);
768 }
769 
770 //
771 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4(
772 // CHECK-RV64-NEXT:  entry:
773 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
774 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
775 //
test_vsaddu_vv_u32m4(vuint32m4_t op1,vuint32m4_t op2,size_t vl)776 vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
777   return vsaddu_vv_u32m4(op1, op2, vl);
778 }
779 
780 //
781 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4(
782 // CHECK-RV64-NEXT:  entry:
783 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
784 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
785 //
test_vsaddu_vx_u32m4(vuint32m4_t op1,uint32_t op2,size_t vl)786 vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
787   return vsaddu_vx_u32m4(op1, op2, vl);
788 }
789 
790 //
791 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8(
792 // CHECK-RV64-NEXT:  entry:
793 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
794 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
795 //
test_vsaddu_vv_u32m8(vuint32m8_t op1,vuint32m8_t op2,size_t vl)796 vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
797   return vsaddu_vv_u32m8(op1, op2, vl);
798 }
799 
800 //
801 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8(
802 // CHECK-RV64-NEXT:  entry:
803 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
804 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
805 //
test_vsaddu_vx_u32m8(vuint32m8_t op1,uint32_t op2,size_t vl)806 vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
807   return vsaddu_vx_u32m8(op1, op2, vl);
808 }
809 
810 //
811 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1(
812 // CHECK-RV64-NEXT:  entry:
813 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
814 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
815 //
test_vsaddu_vv_u64m1(vuint64m1_t op1,vuint64m1_t op2,size_t vl)816 vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
817   return vsaddu_vv_u64m1(op1, op2, vl);
818 }
819 
820 //
821 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1(
822 // CHECK-RV64-NEXT:  entry:
823 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
824 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
825 //
test_vsaddu_vx_u64m1(vuint64m1_t op1,uint64_t op2,size_t vl)826 vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
827   return vsaddu_vx_u64m1(op1, op2, vl);
828 }
829 
830 //
831 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2(
832 // CHECK-RV64-NEXT:  entry:
833 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
834 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
835 //
test_vsaddu_vv_u64m2(vuint64m2_t op1,vuint64m2_t op2,size_t vl)836 vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
837   return vsaddu_vv_u64m2(op1, op2, vl);
838 }
839 
840 //
841 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2(
842 // CHECK-RV64-NEXT:  entry:
843 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
844 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
845 //
test_vsaddu_vx_u64m2(vuint64m2_t op1,uint64_t op2,size_t vl)846 vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
847   return vsaddu_vx_u64m2(op1, op2, vl);
848 }
849 
850 //
851 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4(
852 // CHECK-RV64-NEXT:  entry:
853 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
854 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
855 //
test_vsaddu_vv_u64m4(vuint64m4_t op1,vuint64m4_t op2,size_t vl)856 vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
857   return vsaddu_vv_u64m4(op1, op2, vl);
858 }
859 
860 //
861 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4(
862 // CHECK-RV64-NEXT:  entry:
863 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
864 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
865 //
test_vsaddu_vx_u64m4(vuint64m4_t op1,uint64_t op2,size_t vl)866 vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
867   return vsaddu_vx_u64m4(op1, op2, vl);
868 }
869 
870 //
871 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8(
872 // CHECK-RV64-NEXT:  entry:
873 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
874 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
875 //
test_vsaddu_vv_u64m8(vuint64m8_t op1,vuint64m8_t op2,size_t vl)876 vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
877   return vsaddu_vv_u64m8(op1, op2, vl);
878 }
879 
880 //
881 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8(
882 // CHECK-RV64-NEXT:  entry:
883 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
884 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
885 //
test_vsaddu_vx_u64m8(vuint64m8_t op1,uint64_t op2,size_t vl)886 vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
887   return vsaddu_vx_u64m8(op1, op2, vl);
888 }
889 
890 //
891 // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8_m(
892 // CHECK-RV64-NEXT:  entry:
893 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
894 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
895 //
test_vsadd_vv_i8mf8_m(vbool64_t mask,vint8mf8_t maskedoff,vint8mf8_t op1,vint8mf8_t op2,size_t vl)896 vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
897                                  vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
898   return vsadd_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
899 }
900 
901 //
902 // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_m(
903 // CHECK-RV64-NEXT:  entry:
904 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
905 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
906 //
test_vsadd_vx_i8mf8_m(vbool64_t mask,vint8mf8_t maskedoff,vint8mf8_t op1,int8_t op2,size_t vl)907 vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
908                                  vint8mf8_t op1, int8_t op2, size_t vl) {
909   return vsadd_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
910 }
911 
912 //
913 // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_m(
914 // CHECK-RV64-NEXT:  entry:
915 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
916 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
917 //
test_vsadd_vv_i8mf4_m(vbool32_t mask,vint8mf4_t maskedoff,vint8mf4_t op1,vint8mf4_t op2,size_t vl)918 vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
919                                  vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
920   return vsadd_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
921 }
922 
923 //
924 // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_m(
925 // CHECK-RV64-NEXT:  entry:
926 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
927 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
928 //
test_vsadd_vx_i8mf4_m(vbool32_t mask,vint8mf4_t maskedoff,vint8mf4_t op1,int8_t op2,size_t vl)929 vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
930                                  vint8mf4_t op1, int8_t op2, size_t vl) {
931   return vsadd_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
932 }
933 
934 //
935 // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_m(
936 // CHECK-RV64-NEXT:  entry:
937 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
938 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
939 //
test_vsadd_vv_i8mf2_m(vbool16_t mask,vint8mf2_t maskedoff,vint8mf2_t op1,vint8mf2_t op2,size_t vl)940 vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
941                                  vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
942   return vsadd_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
943 }
944 
945 //
946 // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_m(
947 // CHECK-RV64-NEXT:  entry:
948 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
949 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
950 //
test_vsadd_vx_i8mf2_m(vbool16_t mask,vint8mf2_t maskedoff,vint8mf2_t op1,int8_t op2,size_t vl)951 vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
952                                  vint8mf2_t op1, int8_t op2, size_t vl) {
953   return vsadd_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
954 }
955 
956 //
957 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_m(
958 // CHECK-RV64-NEXT:  entry:
959 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
960 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
961 //
test_vsadd_vv_i8m1_m(vbool8_t mask,vint8m1_t maskedoff,vint8m1_t op1,vint8m1_t op2,size_t vl)962 vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
963                                vint8m1_t op1, vint8m1_t op2, size_t vl) {
964   return vsadd_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
965 }
966 
967 //
968 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_m(
969 // CHECK-RV64-NEXT:  entry:
970 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
971 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
972 //
test_vsadd_vx_i8m1_m(vbool8_t mask,vint8m1_t maskedoff,vint8m1_t op1,int8_t op2,size_t vl)973 vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
974                                vint8m1_t op1, int8_t op2, size_t vl) {
975   return vsadd_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
976 }
977 
978 //
979 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_m(
980 // CHECK-RV64-NEXT:  entry:
981 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
982 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
983 //
test_vsadd_vv_i8m2_m(vbool4_t mask,vint8m2_t maskedoff,vint8m2_t op1,vint8m2_t op2,size_t vl)984 vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
985                                vint8m2_t op1, vint8m2_t op2, size_t vl) {
986   return vsadd_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
987 }
988 
989 //
990 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_m(
991 // CHECK-RV64-NEXT:  entry:
992 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
993 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
994 //
test_vsadd_vx_i8m2_m(vbool4_t mask,vint8m2_t maskedoff,vint8m2_t op1,int8_t op2,size_t vl)995 vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
996                                vint8m2_t op1, int8_t op2, size_t vl) {
997   return vsadd_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
998 }
999 
1000 //
1001 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_m(
1002 // CHECK-RV64-NEXT:  entry:
1003 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1004 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
1005 //
test_vsadd_vv_i8m4_m(vbool2_t mask,vint8m4_t maskedoff,vint8m4_t op1,vint8m4_t op2,size_t vl)1006 vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
1007                                vint8m4_t op1, vint8m4_t op2, size_t vl) {
1008   return vsadd_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
1009 }
1010 
1011 //
1012 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_m(
1013 // CHECK-RV64-NEXT:  entry:
1014 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1015 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
1016 //
test_vsadd_vx_i8m4_m(vbool2_t mask,vint8m4_t maskedoff,vint8m4_t op1,int8_t op2,size_t vl)1017 vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
1018                                vint8m4_t op1, int8_t op2, size_t vl) {
1019   return vsadd_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
1020 }
1021 
1022 //
1023 // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_m(
1024 // CHECK-RV64-NEXT:  entry:
1025 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1026 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
1027 //
test_vsadd_vv_i8m8_m(vbool1_t mask,vint8m8_t maskedoff,vint8m8_t op1,vint8m8_t op2,size_t vl)1028 vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
1029                                vint8m8_t op1, vint8m8_t op2, size_t vl) {
1030   return vsadd_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
1031 }
1032 
1033 //
1034 // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_m(
1035 // CHECK-RV64-NEXT:  entry:
1036 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1037 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
1038 //
test_vsadd_vx_i8m8_m(vbool1_t mask,vint8m8_t maskedoff,vint8m8_t op1,int8_t op2,size_t vl)1039 vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
1040                                vint8m8_t op1, int8_t op2, size_t vl) {
1041   return vsadd_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
1042 }
1043 
1044 //
1045 // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_m(
1046 // CHECK-RV64-NEXT:  entry:
1047 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1048 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
1049 //
test_vsadd_vv_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,vint16mf4_t op2,size_t vl)1050 vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
1051                                    vint16mf4_t op1, vint16mf4_t op2,
1052                                    size_t vl) {
1053   return vsadd_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
1054 }
1055 
1056 //
1057 // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_m(
1058 // CHECK-RV64-NEXT:  entry:
1059 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1060 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
1061 //
test_vsadd_vx_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,int16_t op2,size_t vl)1062 vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
1063                                    vint16mf4_t op1, int16_t op2, size_t vl) {
1064   return vsadd_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
1065 }
1066 
1067 //
1068 // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_m(
1069 // CHECK-RV64-NEXT:  entry:
1070 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1071 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
1072 //
test_vsadd_vv_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,vint16mf2_t op2,size_t vl)1073 vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
1074                                    vint16mf2_t op1, vint16mf2_t op2,
1075                                    size_t vl) {
1076   return vsadd_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
1077 }
1078 
1079 //
1080 // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_m(
1081 // CHECK-RV64-NEXT:  entry:
1082 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1083 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
1084 //
test_vsadd_vx_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,int16_t op2,size_t vl)1085 vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
1086                                    vint16mf2_t op1, int16_t op2, size_t vl) {
1087   return vsadd_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
1088 }
1089 
1090 //
1091 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_m(
1092 // CHECK-RV64-NEXT:  entry:
1093 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1094 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
1095 //
test_vsadd_vv_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,vint16m1_t op2,size_t vl)1096 vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
1097                                  vint16m1_t op1, vint16m1_t op2, size_t vl) {
1098   return vsadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
1099 }
1100 
1101 //
1102 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_m(
1103 // CHECK-RV64-NEXT:  entry:
1104 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1105 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
1106 //
test_vsadd_vx_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,int16_t op2,size_t vl)1107 vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
1108                                  vint16m1_t op1, int16_t op2, size_t vl) {
1109   return vsadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
1110 }
1111 
1112 //
1113 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_m(
1114 // CHECK-RV64-NEXT:  entry:
1115 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1116 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
1117 //
test_vsadd_vv_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,vint16m2_t op2,size_t vl)1118 vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
1119                                  vint16m2_t op1, vint16m2_t op2, size_t vl) {
1120   return vsadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
1121 }
1122 
1123 //
1124 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_m(
1125 // CHECK-RV64-NEXT:  entry:
1126 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1127 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
1128 //
test_vsadd_vx_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,int16_t op2,size_t vl)1129 vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
1130                                  vint16m2_t op1, int16_t op2, size_t vl) {
1131   return vsadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
1132 }
1133 
1134 //
1135 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_m(
1136 // CHECK-RV64-NEXT:  entry:
1137 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1138 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1139 //
test_vsadd_vv_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,vint16m4_t op2,size_t vl)1140 vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
1141                                  vint16m4_t op1, vint16m4_t op2, size_t vl) {
1142   return vsadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
1143 }
1144 
1145 //
1146 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_m(
1147 // CHECK-RV64-NEXT:  entry:
1148 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1149 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1150 //
test_vsadd_vx_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,int16_t op2,size_t vl)1151 vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
1152                                  vint16m4_t op1, int16_t op2, size_t vl) {
1153   return vsadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
1154 }
1155 
1156 //
1157 // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_m(
1158 // CHECK-RV64-NEXT:  entry:
1159 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1160 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1161 //
test_vsadd_vv_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,vint16m8_t op2,size_t vl)1162 vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
1163                                  vint16m8_t op1, vint16m8_t op2, size_t vl) {
1164   return vsadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
1165 }
1166 
1167 //
1168 // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_m(
1169 // CHECK-RV64-NEXT:  entry:
1170 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1171 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1172 //
test_vsadd_vx_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,int16_t op2,size_t vl)1173 vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
1174                                  vint16m8_t op1, int16_t op2, size_t vl) {
1175   return vsadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
1176 }
1177 
1178 //
1179 // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_m(
1180 // CHECK-RV64-NEXT:  entry:
1181 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1182 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1183 //
test_vsadd_vv_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,vint32mf2_t op2,size_t vl)1184 vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
1185                                    vint32mf2_t op1, vint32mf2_t op2,
1186                                    size_t vl) {
1187   return vsadd_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
1188 }
1189 
1190 //
1191 // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_m(
1192 // CHECK-RV64-NEXT:  entry:
1193 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1194 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1195 //
test_vsadd_vx_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,int32_t op2,size_t vl)1196 vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
1197                                    vint32mf2_t op1, int32_t op2, size_t vl) {
1198   return vsadd_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
1199 }
1200 
1201 //
1202 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_m(
1203 // CHECK-RV64-NEXT:  entry:
1204 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1205 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1206 //
test_vsadd_vv_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,vint32m1_t op2,size_t vl)1207 vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
1208                                  vint32m1_t op1, vint32m1_t op2, size_t vl) {
1209   return vsadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
1210 }
1211 
1212 //
1213 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_m(
1214 // CHECK-RV64-NEXT:  entry:
1215 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1216 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1217 //
test_vsadd_vx_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,int32_t op2,size_t vl)1218 vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
1219                                  vint32m1_t op1, int32_t op2, size_t vl) {
1220   return vsadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
1221 }
1222 
1223 //
1224 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_m(
1225 // CHECK-RV64-NEXT:  entry:
1226 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1227 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1228 //
test_vsadd_vv_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,vint32m2_t op2,size_t vl)1229 vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
1230                                  vint32m2_t op1, vint32m2_t op2, size_t vl) {
1231   return vsadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
1232 }
1233 
1234 //
1235 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_m(
1236 // CHECK-RV64-NEXT:  entry:
1237 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1238 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1239 //
test_vsadd_vx_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,int32_t op2,size_t vl)1240 vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
1241                                  vint32m2_t op1, int32_t op2, size_t vl) {
1242   return vsadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
1243 }
1244 
1245 //
1246 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_m(
1247 // CHECK-RV64-NEXT:  entry:
1248 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1249 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1250 //
test_vsadd_vv_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,vint32m4_t op2,size_t vl)1251 vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
1252                                  vint32m4_t op1, vint32m4_t op2, size_t vl) {
1253   return vsadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
1254 }
1255 
1256 //
1257 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_m(
1258 // CHECK-RV64-NEXT:  entry:
1259 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1260 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1261 //
test_vsadd_vx_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,int32_t op2,size_t vl)1262 vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
1263                                  vint32m4_t op1, int32_t op2, size_t vl) {
1264   return vsadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
1265 }
1266 
1267 //
1268 // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_m(
1269 // CHECK-RV64-NEXT:  entry:
1270 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1271 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1272 //
test_vsadd_vv_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,vint32m8_t op2,size_t vl)1273 vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
1274                                  vint32m8_t op1, vint32m8_t op2, size_t vl) {
1275   return vsadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
1276 }
1277 
1278 //
1279 // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_m(
1280 // CHECK-RV64-NEXT:  entry:
1281 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1282 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1283 //
test_vsadd_vx_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,int32_t op2,size_t vl)1284 vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
1285                                  vint32m8_t op1, int32_t op2, size_t vl) {
1286   return vsadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
1287 }
1288 
1289 //
1290 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_m(
1291 // CHECK-RV64-NEXT:  entry:
1292 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1293 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1294 //
test_vsadd_vv_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,vint64m1_t op2,size_t vl)1295 vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
1296                                  vint64m1_t op1, vint64m1_t op2, size_t vl) {
1297   return vsadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
1298 }
1299 
1300 //
1301 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_m(
1302 // CHECK-RV64-NEXT:  entry:
1303 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1304 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1305 //
test_vsadd_vx_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,int64_t op2,size_t vl)1306 vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
1307                                  vint64m1_t op1, int64_t op2, size_t vl) {
1308   return vsadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
1309 }
1310 
1311 //
1312 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_m(
1313 // CHECK-RV64-NEXT:  entry:
1314 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1315 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1316 //
test_vsadd_vv_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,vint64m2_t op2,size_t vl)1317 vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
1318                                  vint64m2_t op1, vint64m2_t op2, size_t vl) {
1319   return vsadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
1320 }
1321 
1322 //
1323 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_m(
1324 // CHECK-RV64-NEXT:  entry:
1325 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1326 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1327 //
test_vsadd_vx_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,int64_t op2,size_t vl)1328 vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
1329                                  vint64m2_t op1, int64_t op2, size_t vl) {
1330   return vsadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
1331 }
1332 
1333 //
1334 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_m(
1335 // CHECK-RV64-NEXT:  entry:
1336 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1337 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1338 //
test_vsadd_vv_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,vint64m4_t op2,size_t vl)1339 vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
1340                                  vint64m4_t op1, vint64m4_t op2, size_t vl) {
1341   return vsadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
1342 }
1343 
1344 //
1345 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_m(
1346 // CHECK-RV64-NEXT:  entry:
1347 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1348 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1349 //
test_vsadd_vx_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,int64_t op2,size_t vl)1350 vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
1351                                  vint64m4_t op1, int64_t op2, size_t vl) {
1352   return vsadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
1353 }
1354 
1355 //
1356 // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_m(
1357 // CHECK-RV64-NEXT:  entry:
1358 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1359 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1360 //
test_vsadd_vv_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,vint64m8_t op2,size_t vl)1361 vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
1362                                  vint64m8_t op1, vint64m8_t op2, size_t vl) {
1363   return vsadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
1364 }
1365 
1366 //
1367 // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_m(
1368 // CHECK-RV64-NEXT:  entry:
1369 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1370 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1371 //
test_vsadd_vx_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,int64_t op2,size_t vl)1372 vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
1373                                  vint64m8_t op1, int64_t op2, size_t vl) {
1374   return vsadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
1375 }
1376 
1377 //
1378 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8_m(
1379 // CHECK-RV64-NEXT:  entry:
1380 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1381 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
1382 //
test_vsaddu_vv_u8mf8_m(vbool64_t mask,vuint8mf8_t maskedoff,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)1383 vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
1384                                    vuint8mf8_t op1, vuint8mf8_t op2,
1385                                    size_t vl) {
1386   return vsaddu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
1387 }
1388 
1389 //
1390 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_m(
1391 // CHECK-RV64-NEXT:  entry:
1392 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1393 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
1394 //
test_vsaddu_vx_u8mf8_m(vbool64_t mask,vuint8mf8_t maskedoff,vuint8mf8_t op1,uint8_t op2,size_t vl)1395 vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
1396                                    vuint8mf8_t op1, uint8_t op2, size_t vl) {
1397   return vsaddu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
1398 }
1399 
1400 //
1401 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_m(
1402 // CHECK-RV64-NEXT:  entry:
1403 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1404 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
1405 //
test_vsaddu_vv_u8mf4_m(vbool32_t mask,vuint8mf4_t maskedoff,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)1406 vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
1407                                    vuint8mf4_t op1, vuint8mf4_t op2,
1408                                    size_t vl) {
1409   return vsaddu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
1410 }
1411 
1412 //
1413 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_m(
1414 // CHECK-RV64-NEXT:  entry:
1415 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1416 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
1417 //
test_vsaddu_vx_u8mf4_m(vbool32_t mask,vuint8mf4_t maskedoff,vuint8mf4_t op1,uint8_t op2,size_t vl)1418 vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
1419                                    vuint8mf4_t op1, uint8_t op2, size_t vl) {
1420   return vsaddu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
1421 }
1422 
1423 //
1424 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_m(
1425 // CHECK-RV64-NEXT:  entry:
1426 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1427 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
1428 //
test_vsaddu_vv_u8mf2_m(vbool16_t mask,vuint8mf2_t maskedoff,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)1429 vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
1430                                    vuint8mf2_t op1, vuint8mf2_t op2,
1431                                    size_t vl) {
1432   return vsaddu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
1433 }
1434 
1435 //
1436 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_m(
1437 // CHECK-RV64-NEXT:  entry:
1438 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1439 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
1440 //
test_vsaddu_vx_u8mf2_m(vbool16_t mask,vuint8mf2_t maskedoff,vuint8mf2_t op1,uint8_t op2,size_t vl)1441 vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
1442                                    vuint8mf2_t op1, uint8_t op2, size_t vl) {
1443   return vsaddu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
1444 }
1445 
1446 //
1447 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_m(
1448 // CHECK-RV64-NEXT:  entry:
1449 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1450 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
1451 //
test_vsaddu_vv_u8m1_m(vbool8_t mask,vuint8m1_t maskedoff,vuint8m1_t op1,vuint8m1_t op2,size_t vl)1452 vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
1453                                  vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
1454   return vsaddu_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
1455 }
1456 
1457 //
1458 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_m(
1459 // CHECK-RV64-NEXT:  entry:
1460 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1461 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
1462 //
test_vsaddu_vx_u8m1_m(vbool8_t mask,vuint8m1_t maskedoff,vuint8m1_t op1,uint8_t op2,size_t vl)1463 vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
1464                                  vuint8m1_t op1, uint8_t op2, size_t vl) {
1465   return vsaddu_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
1466 }
1467 
1468 //
1469 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_m(
1470 // CHECK-RV64-NEXT:  entry:
1471 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1472 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
1473 //
test_vsaddu_vv_u8m2_m(vbool4_t mask,vuint8m2_t maskedoff,vuint8m2_t op1,vuint8m2_t op2,size_t vl)1474 vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
1475                                  vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
1476   return vsaddu_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
1477 }
1478 
1479 //
1480 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_m(
1481 // CHECK-RV64-NEXT:  entry:
1482 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1483 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
1484 //
test_vsaddu_vx_u8m2_m(vbool4_t mask,vuint8m2_t maskedoff,vuint8m2_t op1,uint8_t op2,size_t vl)1485 vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
1486                                  vuint8m2_t op1, uint8_t op2, size_t vl) {
1487   return vsaddu_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
1488 }
1489 
1490 //
1491 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_m(
1492 // CHECK-RV64-NEXT:  entry:
1493 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1494 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
1495 //
test_vsaddu_vv_u8m4_m(vbool2_t mask,vuint8m4_t maskedoff,vuint8m4_t op1,vuint8m4_t op2,size_t vl)1496 vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
1497                                  vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
1498   return vsaddu_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
1499 }
1500 
1501 //
1502 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_m(
1503 // CHECK-RV64-NEXT:  entry:
1504 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1505 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
1506 //
test_vsaddu_vx_u8m4_m(vbool2_t mask,vuint8m4_t maskedoff,vuint8m4_t op1,uint8_t op2,size_t vl)1507 vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
1508                                  vuint8m4_t op1, uint8_t op2, size_t vl) {
1509   return vsaddu_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
1510 }
1511 
1512 //
1513 // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_m(
1514 // CHECK-RV64-NEXT:  entry:
1515 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1516 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
1517 //
test_vsaddu_vv_u8m8_m(vbool1_t mask,vuint8m8_t maskedoff,vuint8m8_t op1,vuint8m8_t op2,size_t vl)1518 vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
1519                                  vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
1520   return vsaddu_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
1521 }
1522 
1523 //
1524 // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_m(
1525 // CHECK-RV64-NEXT:  entry:
1526 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1527 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
1528 //
test_vsaddu_vx_u8m8_m(vbool1_t mask,vuint8m8_t maskedoff,vuint8m8_t op1,uint8_t op2,size_t vl)1529 vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
1530                                  vuint8m8_t op1, uint8_t op2, size_t vl) {
1531   return vsaddu_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
1532 }
1533 
1534 //
1535 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_m(
1536 // CHECK-RV64-NEXT:  entry:
1537 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1538 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
1539 //
test_vsaddu_vv_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)1540 vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
1541                                      vuint16mf4_t op1, vuint16mf4_t op2,
1542                                      size_t vl) {
1543   return vsaddu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
1544 }
1545 
1546 //
1547 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_m(
1548 // CHECK-RV64-NEXT:  entry:
1549 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1550 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
1551 //
test_vsaddu_vx_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,uint16_t op2,size_t vl)1552 vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
1553                                      vuint16mf4_t op1, uint16_t op2,
1554                                      size_t vl) {
1555   return vsaddu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
1556 }
1557 
1558 //
1559 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_m(
1560 // CHECK-RV64-NEXT:  entry:
1561 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1562 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
1563 //
test_vsaddu_vv_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)1564 vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
1565                                      vuint16mf2_t op1, vuint16mf2_t op2,
1566                                      size_t vl) {
1567   return vsaddu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
1568 }
1569 
1570 //
1571 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_m(
1572 // CHECK-RV64-NEXT:  entry:
1573 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1574 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
1575 //
test_vsaddu_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,uint16_t op2,size_t vl)1576 vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
1577                                      vuint16mf2_t op1, uint16_t op2,
1578                                      size_t vl) {
1579   return vsaddu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
1580 }
1581 
1582 //
1583 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_m(
1584 // CHECK-RV64-NEXT:  entry:
1585 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1586 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
1587 //
test_vsaddu_vv_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,vuint16m1_t op2,size_t vl)1588 vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
1589                                    vuint16m1_t op1, vuint16m1_t op2,
1590                                    size_t vl) {
1591   return vsaddu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
1592 }
1593 
1594 //
1595 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_m(
1596 // CHECK-RV64-NEXT:  entry:
1597 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1598 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
1599 //
test_vsaddu_vx_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,uint16_t op2,size_t vl)1600 vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
1601                                    vuint16m1_t op1, uint16_t op2, size_t vl) {
1602   return vsaddu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
1603 }
1604 
1605 //
1606 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_m(
1607 // CHECK-RV64-NEXT:  entry:
1608 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1609 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
1610 //
test_vsaddu_vv_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,vuint16m2_t op2,size_t vl)1611 vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
1612                                    vuint16m2_t op1, vuint16m2_t op2,
1613                                    size_t vl) {
1614   return vsaddu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
1615 }
1616 
1617 //
1618 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_m(
1619 // CHECK-RV64-NEXT:  entry:
1620 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1621 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
1622 //
test_vsaddu_vx_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,uint16_t op2,size_t vl)1623 vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
1624                                    vuint16m2_t op1, uint16_t op2, size_t vl) {
1625   return vsaddu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
1626 }
1627 
1628 //
1629 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_m(
1630 // CHECK-RV64-NEXT:  entry:
1631 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1632 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1633 //
test_vsaddu_vv_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,vuint16m4_t op2,size_t vl)1634 vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
1635                                    vuint16m4_t op1, vuint16m4_t op2,
1636                                    size_t vl) {
1637   return vsaddu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
1638 }
1639 
1640 //
1641 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_m(
1642 // CHECK-RV64-NEXT:  entry:
1643 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1644 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1645 //
test_vsaddu_vx_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,uint16_t op2,size_t vl)1646 vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
1647                                    vuint16m4_t op1, uint16_t op2, size_t vl) {
1648   return vsaddu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
1649 }
1650 
1651 //
1652 // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_m(
1653 // CHECK-RV64-NEXT:  entry:
1654 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1655 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1656 //
test_vsaddu_vv_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,vuint16m8_t op2,size_t vl)1657 vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
1658                                    vuint16m8_t op1, vuint16m8_t op2,
1659                                    size_t vl) {
1660   return vsaddu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
1661 }
1662 
1663 //
1664 // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_m(
1665 // CHECK-RV64-NEXT:  entry:
1666 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1667 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1668 //
test_vsaddu_vx_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,uint16_t op2,size_t vl)1669 vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
1670                                    vuint16m8_t op1, uint16_t op2, size_t vl) {
1671   return vsaddu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
1672 }
1673 
1674 //
1675 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_m(
1676 // CHECK-RV64-NEXT:  entry:
1677 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1678 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1679 //
test_vsaddu_vv_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)1680 vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
1681                                      vuint32mf2_t op1, vuint32mf2_t op2,
1682                                      size_t vl) {
1683   return vsaddu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
1684 }
1685 
1686 //
1687 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_m(
1688 // CHECK-RV64-NEXT:  entry:
1689 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1690 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1691 //
test_vsaddu_vx_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,uint32_t op2,size_t vl)1692 vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
1693                                      vuint32mf2_t op1, uint32_t op2,
1694                                      size_t vl) {
1695   return vsaddu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
1696 }
1697 
1698 //
1699 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_m(
1700 // CHECK-RV64-NEXT:  entry:
1701 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1702 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1703 //
test_vsaddu_vv_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,vuint32m1_t op2,size_t vl)1704 vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
1705                                    vuint32m1_t op1, vuint32m1_t op2,
1706                                    size_t vl) {
1707   return vsaddu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
1708 }
1709 
1710 //
1711 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_m(
1712 // CHECK-RV64-NEXT:  entry:
1713 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1714 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1715 //
test_vsaddu_vx_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,uint32_t op2,size_t vl)1716 vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
1717                                    vuint32m1_t op1, uint32_t op2, size_t vl) {
1718   return vsaddu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
1719 }
1720 
1721 //
1722 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_m(
1723 // CHECK-RV64-NEXT:  entry:
1724 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1725 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1726 //
test_vsaddu_vv_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,vuint32m2_t op2,size_t vl)1727 vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
1728                                    vuint32m2_t op1, vuint32m2_t op2,
1729                                    size_t vl) {
1730   return vsaddu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
1731 }
1732 
1733 //
1734 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_m(
1735 // CHECK-RV64-NEXT:  entry:
1736 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1737 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1738 //
test_vsaddu_vx_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,uint32_t op2,size_t vl)1739 vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
1740                                    vuint32m2_t op1, uint32_t op2, size_t vl) {
1741   return vsaddu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
1742 }
1743 
1744 //
1745 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_m(
1746 // CHECK-RV64-NEXT:  entry:
1747 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1748 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1749 //
test_vsaddu_vv_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,vuint32m4_t op2,size_t vl)1750 vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
1751                                    vuint32m4_t op1, vuint32m4_t op2,
1752                                    size_t vl) {
1753   return vsaddu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
1754 }
1755 
1756 //
1757 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_m(
1758 // CHECK-RV64-NEXT:  entry:
1759 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1760 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1761 //
test_vsaddu_vx_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,uint32_t op2,size_t vl)1762 vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
1763                                    vuint32m4_t op1, uint32_t op2, size_t vl) {
1764   return vsaddu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
1765 }
1766 
1767 //
1768 // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_m(
1769 // CHECK-RV64-NEXT:  entry:
1770 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1771 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1772 //
test_vsaddu_vv_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,vuint32m8_t op2,size_t vl)1773 vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
1774                                    vuint32m8_t op1, vuint32m8_t op2,
1775                                    size_t vl) {
1776   return vsaddu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
1777 }
1778 
1779 //
1780 // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_m(
1781 // CHECK-RV64-NEXT:  entry:
1782 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1783 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1784 //
test_vsaddu_vx_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,uint32_t op2,size_t vl)1785 vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
1786                                    vuint32m8_t op1, uint32_t op2, size_t vl) {
1787   return vsaddu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
1788 }
1789 
1790 //
1791 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_m(
1792 // CHECK-RV64-NEXT:  entry:
1793 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1794 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1795 //
test_vsaddu_vv_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,vuint64m1_t op2,size_t vl)1796 vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
1797                                    vuint64m1_t op1, vuint64m1_t op2,
1798                                    size_t vl) {
1799   return vsaddu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
1800 }
1801 
1802 //
1803 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_m(
1804 // CHECK-RV64-NEXT:  entry:
1805 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1806 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1807 //
test_vsaddu_vx_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,uint64_t op2,size_t vl)1808 vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
1809                                    vuint64m1_t op1, uint64_t op2, size_t vl) {
1810   return vsaddu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
1811 }
1812 
1813 //
1814 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_m(
1815 // CHECK-RV64-NEXT:  entry:
1816 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1817 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1818 //
test_vsaddu_vv_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,vuint64m2_t op2,size_t vl)1819 vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
1820                                    vuint64m2_t op1, vuint64m2_t op2,
1821                                    size_t vl) {
1822   return vsaddu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
1823 }
1824 
1825 //
1826 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_m(
1827 // CHECK-RV64-NEXT:  entry:
1828 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1829 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1830 //
test_vsaddu_vx_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,uint64_t op2,size_t vl)1831 vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
1832                                    vuint64m2_t op1, uint64_t op2, size_t vl) {
1833   return vsaddu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
1834 }
1835 
1836 //
1837 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_m(
1838 // CHECK-RV64-NEXT:  entry:
1839 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1840 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1841 //
test_vsaddu_vv_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,vuint64m4_t op2,size_t vl)1842 vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
1843                                    vuint64m4_t op1, vuint64m4_t op2,
1844                                    size_t vl) {
1845   return vsaddu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
1846 }
1847 
1848 //
1849 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_m(
1850 // CHECK-RV64-NEXT:  entry:
1851 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1852 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1853 //
test_vsaddu_vx_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,uint64_t op2,size_t vl)1854 vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
1855                                    vuint64m4_t op1, uint64_t op2, size_t vl) {
1856   return vsaddu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
1857 }
1858 
1859 //
1860 // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_m(
1861 // CHECK-RV64-NEXT:  entry:
1862 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1863 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1864 //
test_vsaddu_vv_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,vuint64m8_t op2,size_t vl)1865 vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
1866                                    vuint64m8_t op1, vuint64m8_t op2,
1867                                    size_t vl) {
1868   return vsaddu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
1869 }
1870 
1871 //
1872 // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_m(
1873 // CHECK-RV64-NEXT:  entry:
1874 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1875 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1876 //
test_vsaddu_vx_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,uint64_t op2,size_t vl)1877 vuint64m8_t test_vsaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
1878                                    vuint64m8_t op1, uint64_t op2, size_t vl) {
1879   return vsaddu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
1880 }
1881