1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
4
5 #include <riscv_vector.h>
6
7 //
8 // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf8(
9 // CHECK-RV64-NEXT: entry:
10 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
11 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
12 //
test_vadc_vvm_i8mf8(vint8mf8_t op1,vint8mf8_t op2,vbool64_t carryin,size_t vl)13 vint8mf8_t test_vadc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2,
14 vbool64_t carryin, size_t vl) {
15 return vadc_vvm_i8mf8(op1, op2, carryin, vl);
16 }
17
18 //
19 // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf8(
20 // CHECK-RV64-NEXT: entry:
21 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
22 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
23 //
test_vadc_vxm_i8mf8(vint8mf8_t op1,int8_t op2,vbool64_t carryin,size_t vl)24 vint8mf8_t test_vadc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t carryin,
25 size_t vl) {
26 return vadc_vxm_i8mf8(op1, op2, carryin, vl);
27 }
28
29 //
30 // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf4(
31 // CHECK-RV64-NEXT: entry:
32 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
33 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
34 //
test_vadc_vvm_i8mf4(vint8mf4_t op1,vint8mf4_t op2,vbool32_t carryin,size_t vl)35 vint8mf4_t test_vadc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2,
36 vbool32_t carryin, size_t vl) {
37 return vadc_vvm_i8mf4(op1, op2, carryin, vl);
38 }
39
40 //
41 // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf4(
42 // CHECK-RV64-NEXT: entry:
43 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
44 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
45 //
test_vadc_vxm_i8mf4(vint8mf4_t op1,int8_t op2,vbool32_t carryin,size_t vl)46 vint8mf4_t test_vadc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t carryin,
47 size_t vl) {
48 return vadc_vxm_i8mf4(op1, op2, carryin, vl);
49 }
50
51 //
52 // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf2(
53 // CHECK-RV64-NEXT: entry:
54 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
55 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
56 //
test_vadc_vvm_i8mf2(vint8mf2_t op1,vint8mf2_t op2,vbool16_t carryin,size_t vl)57 vint8mf2_t test_vadc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2,
58 vbool16_t carryin, size_t vl) {
59 return vadc_vvm_i8mf2(op1, op2, carryin, vl);
60 }
61
62 //
63 // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf2(
64 // CHECK-RV64-NEXT: entry:
65 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
66 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
67 //
test_vadc_vxm_i8mf2(vint8mf2_t op1,int8_t op2,vbool16_t carryin,size_t vl)68 vint8mf2_t test_vadc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t carryin,
69 size_t vl) {
70 return vadc_vxm_i8mf2(op1, op2, carryin, vl);
71 }
72
73 //
74 // CHECK-RV64-LABEL: @test_vadc_vvm_i8m1(
75 // CHECK-RV64-NEXT: entry:
76 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
77 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
78 //
test_vadc_vvm_i8m1(vint8m1_t op1,vint8m1_t op2,vbool8_t carryin,size_t vl)79 vint8m1_t test_vadc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin,
80 size_t vl) {
81 return vadc_vvm_i8m1(op1, op2, carryin, vl);
82 }
83
84 //
85 // CHECK-RV64-LABEL: @test_vadc_vxm_i8m1(
86 // CHECK-RV64-NEXT: entry:
87 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
88 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
89 //
test_vadc_vxm_i8m1(vint8m1_t op1,int8_t op2,vbool8_t carryin,size_t vl)90 vint8m1_t test_vadc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t carryin,
91 size_t vl) {
92 return vadc_vxm_i8m1(op1, op2, carryin, vl);
93 }
94
95 //
96 // CHECK-RV64-LABEL: @test_vadc_vvm_i8m2(
97 // CHECK-RV64-NEXT: entry:
98 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
99 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
100 //
test_vadc_vvm_i8m2(vint8m2_t op1,vint8m2_t op2,vbool4_t carryin,size_t vl)101 vint8m2_t test_vadc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin,
102 size_t vl) {
103 return vadc_vvm_i8m2(op1, op2, carryin, vl);
104 }
105
106 //
107 // CHECK-RV64-LABEL: @test_vadc_vxm_i8m2(
108 // CHECK-RV64-NEXT: entry:
109 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
110 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
111 //
test_vadc_vxm_i8m2(vint8m2_t op1,int8_t op2,vbool4_t carryin,size_t vl)112 vint8m2_t test_vadc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t carryin,
113 size_t vl) {
114 return vadc_vxm_i8m2(op1, op2, carryin, vl);
115 }
116
117 //
118 // CHECK-RV64-LABEL: @test_vadc_vvm_i8m4(
119 // CHECK-RV64-NEXT: entry:
120 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
121 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
122 //
test_vadc_vvm_i8m4(vint8m4_t op1,vint8m4_t op2,vbool2_t carryin,size_t vl)123 vint8m4_t test_vadc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin,
124 size_t vl) {
125 return vadc_vvm_i8m4(op1, op2, carryin, vl);
126 }
127
128 //
129 // CHECK-RV64-LABEL: @test_vadc_vxm_i8m4(
130 // CHECK-RV64-NEXT: entry:
131 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
132 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
133 //
test_vadc_vxm_i8m4(vint8m4_t op1,int8_t op2,vbool2_t carryin,size_t vl)134 vint8m4_t test_vadc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t carryin,
135 size_t vl) {
136 return vadc_vxm_i8m4(op1, op2, carryin, vl);
137 }
138
139 //
140 // CHECK-RV64-LABEL: @test_vadc_vvm_i8m8(
141 // CHECK-RV64-NEXT: entry:
142 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
143 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
144 //
test_vadc_vvm_i8m8(vint8m8_t op1,vint8m8_t op2,vbool1_t carryin,size_t vl)145 vint8m8_t test_vadc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin,
146 size_t vl) {
147 return vadc_vvm_i8m8(op1, op2, carryin, vl);
148 }
149
150 //
151 // CHECK-RV64-LABEL: @test_vadc_vxm_i8m8(
152 // CHECK-RV64-NEXT: entry:
153 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
154 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
155 //
test_vadc_vxm_i8m8(vint8m8_t op1,int8_t op2,vbool1_t carryin,size_t vl)156 vint8m8_t test_vadc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t carryin,
157 size_t vl) {
158 return vadc_vxm_i8m8(op1, op2, carryin, vl);
159 }
160
161 //
162 // CHECK-RV64-LABEL: @test_vadc_vvm_i16mf4(
163 // CHECK-RV64-NEXT: entry:
164 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
165 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
166 //
test_vadc_vvm_i16mf4(vint16mf4_t op1,vint16mf4_t op2,vbool64_t carryin,size_t vl)167 vint16mf4_t test_vadc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2,
168 vbool64_t carryin, size_t vl) {
169 return vadc_vvm_i16mf4(op1, op2, carryin, vl);
170 }
171
172 //
173 // CHECK-RV64-LABEL: @test_vadc_vxm_i16mf4(
174 // CHECK-RV64-NEXT: entry:
175 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
176 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
177 //
test_vadc_vxm_i16mf4(vint16mf4_t op1,int16_t op2,vbool64_t carryin,size_t vl)178 vint16mf4_t test_vadc_vxm_i16mf4(vint16mf4_t op1, int16_t op2,
179 vbool64_t carryin, size_t vl) {
180 return vadc_vxm_i16mf4(op1, op2, carryin, vl);
181 }
182
183 //
184 // CHECK-RV64-LABEL: @test_vadc_vvm_i16mf2(
185 // CHECK-RV64-NEXT: entry:
186 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
187 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
188 //
test_vadc_vvm_i16mf2(vint16mf2_t op1,vint16mf2_t op2,vbool32_t carryin,size_t vl)189 vint16mf2_t test_vadc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2,
190 vbool32_t carryin, size_t vl) {
191 return vadc_vvm_i16mf2(op1, op2, carryin, vl);
192 }
193
194 //
195 // CHECK-RV64-LABEL: @test_vadc_vxm_i16mf2(
196 // CHECK-RV64-NEXT: entry:
197 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
198 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
199 //
test_vadc_vxm_i16mf2(vint16mf2_t op1,int16_t op2,vbool32_t carryin,size_t vl)200 vint16mf2_t test_vadc_vxm_i16mf2(vint16mf2_t op1, int16_t op2,
201 vbool32_t carryin, size_t vl) {
202 return vadc_vxm_i16mf2(op1, op2, carryin, vl);
203 }
204
205 //
206 // CHECK-RV64-LABEL: @test_vadc_vvm_i16m1(
207 // CHECK-RV64-NEXT: entry:
208 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
209 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
210 //
test_vadc_vvm_i16m1(vint16m1_t op1,vint16m1_t op2,vbool16_t carryin,size_t vl)211 vint16m1_t test_vadc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2,
212 vbool16_t carryin, size_t vl) {
213 return vadc_vvm_i16m1(op1, op2, carryin, vl);
214 }
215
216 //
217 // CHECK-RV64-LABEL: @test_vadc_vxm_i16m1(
218 // CHECK-RV64-NEXT: entry:
219 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
220 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
221 //
test_vadc_vxm_i16m1(vint16m1_t op1,int16_t op2,vbool16_t carryin,size_t vl)222 vint16m1_t test_vadc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t carryin,
223 size_t vl) {
224 return vadc_vxm_i16m1(op1, op2, carryin, vl);
225 }
226
227 //
228 // CHECK-RV64-LABEL: @test_vadc_vvm_i16m2(
229 // CHECK-RV64-NEXT: entry:
230 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
231 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
232 //
test_vadc_vvm_i16m2(vint16m2_t op1,vint16m2_t op2,vbool8_t carryin,size_t vl)233 vint16m2_t test_vadc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t carryin,
234 size_t vl) {
235 return vadc_vvm_i16m2(op1, op2, carryin, vl);
236 }
237
238 //
239 // CHECK-RV64-LABEL: @test_vadc_vxm_i16m2(
240 // CHECK-RV64-NEXT: entry:
241 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
242 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
243 //
test_vadc_vxm_i16m2(vint16m2_t op1,int16_t op2,vbool8_t carryin,size_t vl)244 vint16m2_t test_vadc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t carryin,
245 size_t vl) {
246 return vadc_vxm_i16m2(op1, op2, carryin, vl);
247 }
248
249 //
250 // CHECK-RV64-LABEL: @test_vadc_vvm_i16m4(
251 // CHECK-RV64-NEXT: entry:
252 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
253 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
254 //
test_vadc_vvm_i16m4(vint16m4_t op1,vint16m4_t op2,vbool4_t carryin,size_t vl)255 vint16m4_t test_vadc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t carryin,
256 size_t vl) {
257 return vadc_vvm_i16m4(op1, op2, carryin, vl);
258 }
259
260 //
261 // CHECK-RV64-LABEL: @test_vadc_vxm_i16m4(
262 // CHECK-RV64-NEXT: entry:
263 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
264 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
265 //
test_vadc_vxm_i16m4(vint16m4_t op1,int16_t op2,vbool4_t carryin,size_t vl)266 vint16m4_t test_vadc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t carryin,
267 size_t vl) {
268 return vadc_vxm_i16m4(op1, op2, carryin, vl);
269 }
270
271 //
272 // CHECK-RV64-LABEL: @test_vadc_vvm_i16m8(
273 // CHECK-RV64-NEXT: entry:
274 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
275 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
276 //
test_vadc_vvm_i16m8(vint16m8_t op1,vint16m8_t op2,vbool2_t carryin,size_t vl)277 vint16m8_t test_vadc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t carryin,
278 size_t vl) {
279 return vadc_vvm_i16m8(op1, op2, carryin, vl);
280 }
281
282 //
283 // CHECK-RV64-LABEL: @test_vadc_vxm_i16m8(
284 // CHECK-RV64-NEXT: entry:
285 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
286 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
287 //
test_vadc_vxm_i16m8(vint16m8_t op1,int16_t op2,vbool2_t carryin,size_t vl)288 vint16m8_t test_vadc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t carryin,
289 size_t vl) {
290 return vadc_vxm_i16m8(op1, op2, carryin, vl);
291 }
292
293 //
294 // CHECK-RV64-LABEL: @test_vadc_vvm_i32mf2(
295 // CHECK-RV64-NEXT: entry:
296 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
297 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
298 //
test_vadc_vvm_i32mf2(vint32mf2_t op1,vint32mf2_t op2,vbool64_t carryin,size_t vl)299 vint32mf2_t test_vadc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2,
300 vbool64_t carryin, size_t vl) {
301 return vadc_vvm_i32mf2(op1, op2, carryin, vl);
302 }
303
304 //
305 // CHECK-RV64-LABEL: @test_vadc_vxm_i32mf2(
306 // CHECK-RV64-NEXT: entry:
307 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
308 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
309 //
test_vadc_vxm_i32mf2(vint32mf2_t op1,int32_t op2,vbool64_t carryin,size_t vl)310 vint32mf2_t test_vadc_vxm_i32mf2(vint32mf2_t op1, int32_t op2,
311 vbool64_t carryin, size_t vl) {
312 return vadc_vxm_i32mf2(op1, op2, carryin, vl);
313 }
314
315 //
316 // CHECK-RV64-LABEL: @test_vadc_vvm_i32m1(
317 // CHECK-RV64-NEXT: entry:
318 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
319 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
320 //
test_vadc_vvm_i32m1(vint32m1_t op1,vint32m1_t op2,vbool32_t carryin,size_t vl)321 vint32m1_t test_vadc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2,
322 vbool32_t carryin, size_t vl) {
323 return vadc_vvm_i32m1(op1, op2, carryin, vl);
324 }
325
326 //
327 // CHECK-RV64-LABEL: @test_vadc_vxm_i32m1(
328 // CHECK-RV64-NEXT: entry:
329 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
330 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
331 //
test_vadc_vxm_i32m1(vint32m1_t op1,int32_t op2,vbool32_t carryin,size_t vl)332 vint32m1_t test_vadc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t carryin,
333 size_t vl) {
334 return vadc_vxm_i32m1(op1, op2, carryin, vl);
335 }
336
337 //
338 // CHECK-RV64-LABEL: @test_vadc_vvm_i32m2(
339 // CHECK-RV64-NEXT: entry:
340 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
341 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
342 //
test_vadc_vvm_i32m2(vint32m2_t op1,vint32m2_t op2,vbool16_t carryin,size_t vl)343 vint32m2_t test_vadc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2,
344 vbool16_t carryin, size_t vl) {
345 return vadc_vvm_i32m2(op1, op2, carryin, vl);
346 }
347
348 //
349 // CHECK-RV64-LABEL: @test_vadc_vxm_i32m2(
350 // CHECK-RV64-NEXT: entry:
351 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
352 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
353 //
test_vadc_vxm_i32m2(vint32m2_t op1,int32_t op2,vbool16_t carryin,size_t vl)354 vint32m2_t test_vadc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t carryin,
355 size_t vl) {
356 return vadc_vxm_i32m2(op1, op2, carryin, vl);
357 }
358
359 //
360 // CHECK-RV64-LABEL: @test_vadc_vvm_i32m4(
361 // CHECK-RV64-NEXT: entry:
362 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
363 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
364 //
test_vadc_vvm_i32m4(vint32m4_t op1,vint32m4_t op2,vbool8_t carryin,size_t vl)365 vint32m4_t test_vadc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t carryin,
366 size_t vl) {
367 return vadc_vvm_i32m4(op1, op2, carryin, vl);
368 }
369
370 //
371 // CHECK-RV64-LABEL: @test_vadc_vxm_i32m4(
372 // CHECK-RV64-NEXT: entry:
373 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
374 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
375 //
test_vadc_vxm_i32m4(vint32m4_t op1,int32_t op2,vbool8_t carryin,size_t vl)376 vint32m4_t test_vadc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t carryin,
377 size_t vl) {
378 return vadc_vxm_i32m4(op1, op2, carryin, vl);
379 }
380
381 //
382 // CHECK-RV64-LABEL: @test_vadc_vvm_i32m8(
383 // CHECK-RV64-NEXT: entry:
384 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
385 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
386 //
test_vadc_vvm_i32m8(vint32m8_t op1,vint32m8_t op2,vbool4_t carryin,size_t vl)387 vint32m8_t test_vadc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t carryin,
388 size_t vl) {
389 return vadc_vvm_i32m8(op1, op2, carryin, vl);
390 }
391
392 //
393 // CHECK-RV64-LABEL: @test_vadc_vxm_i32m8(
394 // CHECK-RV64-NEXT: entry:
395 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
396 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
397 //
test_vadc_vxm_i32m8(vint32m8_t op1,int32_t op2,vbool4_t carryin,size_t vl)398 vint32m8_t test_vadc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t carryin,
399 size_t vl) {
400 return vadc_vxm_i32m8(op1, op2, carryin, vl);
401 }
402
403 //
404 // CHECK-RV64-LABEL: @test_vadc_vvm_i64m1(
405 // CHECK-RV64-NEXT: entry:
406 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
407 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
408 //
test_vadc_vvm_i64m1(vint64m1_t op1,vint64m1_t op2,vbool64_t carryin,size_t vl)409 vint64m1_t test_vadc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2,
410 vbool64_t carryin, size_t vl) {
411 return vadc_vvm_i64m1(op1, op2, carryin, vl);
412 }
413
414 //
415 // CHECK-RV64-LABEL: @test_vadc_vxm_i64m1(
416 // CHECK-RV64-NEXT: entry:
417 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
418 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
419 //
test_vadc_vxm_i64m1(vint64m1_t op1,int64_t op2,vbool64_t carryin,size_t vl)420 vint64m1_t test_vadc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t carryin,
421 size_t vl) {
422 return vadc_vxm_i64m1(op1, op2, carryin, vl);
423 }
424
425 //
426 // CHECK-RV64-LABEL: @test_vadc_vvm_i64m2(
427 // CHECK-RV64-NEXT: entry:
428 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
429 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
430 //
test_vadc_vvm_i64m2(vint64m2_t op1,vint64m2_t op2,vbool32_t carryin,size_t vl)431 vint64m2_t test_vadc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2,
432 vbool32_t carryin, size_t vl) {
433 return vadc_vvm_i64m2(op1, op2, carryin, vl);
434 }
435
436 //
437 // CHECK-RV64-LABEL: @test_vadc_vxm_i64m2(
438 // CHECK-RV64-NEXT: entry:
439 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
440 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
441 //
test_vadc_vxm_i64m2(vint64m2_t op1,int64_t op2,vbool32_t carryin,size_t vl)442 vint64m2_t test_vadc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t carryin,
443 size_t vl) {
444 return vadc_vxm_i64m2(op1, op2, carryin, vl);
445 }
446
447 //
448 // CHECK-RV64-LABEL: @test_vadc_vvm_i64m4(
449 // CHECK-RV64-NEXT: entry:
450 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
451 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
452 //
test_vadc_vvm_i64m4(vint64m4_t op1,vint64m4_t op2,vbool16_t carryin,size_t vl)453 vint64m4_t test_vadc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2,
454 vbool16_t carryin, size_t vl) {
455 return vadc_vvm_i64m4(op1, op2, carryin, vl);
456 }
457
458 //
459 // CHECK-RV64-LABEL: @test_vadc_vxm_i64m4(
460 // CHECK-RV64-NEXT: entry:
461 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
462 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
463 //
test_vadc_vxm_i64m4(vint64m4_t op1,int64_t op2,vbool16_t carryin,size_t vl)464 vint64m4_t test_vadc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t carryin,
465 size_t vl) {
466 return vadc_vxm_i64m4(op1, op2, carryin, vl);
467 }
468
469 //
470 // CHECK-RV64-LABEL: @test_vadc_vvm_i64m8(
471 // CHECK-RV64-NEXT: entry:
472 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
473 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
474 //
test_vadc_vvm_i64m8(vint64m8_t op1,vint64m8_t op2,vbool8_t carryin,size_t vl)475 vint64m8_t test_vadc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t carryin,
476 size_t vl) {
477 return vadc_vvm_i64m8(op1, op2, carryin, vl);
478 }
479
480 //
481 // CHECK-RV64-LABEL: @test_vadc_vxm_i64m8(
482 // CHECK-RV64-NEXT: entry:
483 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
484 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
485 //
test_vadc_vxm_i64m8(vint64m8_t op1,int64_t op2,vbool8_t carryin,size_t vl)486 vint64m8_t test_vadc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t carryin,
487 size_t vl) {
488 return vadc_vxm_i64m8(op1, op2, carryin, vl);
489 }
490
491 //
492 // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf8(
493 // CHECK-RV64-NEXT: entry:
494 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
495 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
496 //
test_vadc_vvm_u8mf8(vuint8mf8_t op1,vuint8mf8_t op2,vbool64_t carryin,size_t vl)497 vuint8mf8_t test_vadc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2,
498 vbool64_t carryin, size_t vl) {
499 return vadc_vvm_u8mf8(op1, op2, carryin, vl);
500 }
501
502 //
503 // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf8(
504 // CHECK-RV64-NEXT: entry:
505 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
506 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
507 //
test_vadc_vxm_u8mf8(vuint8mf8_t op1,uint8_t op2,vbool64_t carryin,size_t vl)508 vuint8mf8_t test_vadc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t carryin,
509 size_t vl) {
510 return vadc_vxm_u8mf8(op1, op2, carryin, vl);
511 }
512
513 //
514 // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf4(
515 // CHECK-RV64-NEXT: entry:
516 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
517 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
518 //
test_vadc_vvm_u8mf4(vuint8mf4_t op1,vuint8mf4_t op2,vbool32_t carryin,size_t vl)519 vuint8mf4_t test_vadc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2,
520 vbool32_t carryin, size_t vl) {
521 return vadc_vvm_u8mf4(op1, op2, carryin, vl);
522 }
523
524 //
525 // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf4(
526 // CHECK-RV64-NEXT: entry:
527 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
528 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
529 //
test_vadc_vxm_u8mf4(vuint8mf4_t op1,uint8_t op2,vbool32_t carryin,size_t vl)530 vuint8mf4_t test_vadc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t carryin,
531 size_t vl) {
532 return vadc_vxm_u8mf4(op1, op2, carryin, vl);
533 }
534
535 //
536 // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf2(
537 // CHECK-RV64-NEXT: entry:
538 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
539 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
540 //
test_vadc_vvm_u8mf2(vuint8mf2_t op1,vuint8mf2_t op2,vbool16_t carryin,size_t vl)541 vuint8mf2_t test_vadc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2,
542 vbool16_t carryin, size_t vl) {
543 return vadc_vvm_u8mf2(op1, op2, carryin, vl);
544 }
545
546 //
547 // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf2(
548 // CHECK-RV64-NEXT: entry:
549 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
550 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
551 //
test_vadc_vxm_u8mf2(vuint8mf2_t op1,uint8_t op2,vbool16_t carryin,size_t vl)552 vuint8mf2_t test_vadc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t carryin,
553 size_t vl) {
554 return vadc_vxm_u8mf2(op1, op2, carryin, vl);
555 }
556
557 //
558 // CHECK-RV64-LABEL: @test_vadc_vvm_u8m1(
559 // CHECK-RV64-NEXT: entry:
560 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
561 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
562 //
test_vadc_vvm_u8m1(vuint8m1_t op1,vuint8m1_t op2,vbool8_t carryin,size_t vl)563 vuint8m1_t test_vadc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin,
564 size_t vl) {
565 return vadc_vvm_u8m1(op1, op2, carryin, vl);
566 }
567
568 //
569 // CHECK-RV64-LABEL: @test_vadc_vxm_u8m1(
570 // CHECK-RV64-NEXT: entry:
571 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
572 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
573 //
test_vadc_vxm_u8m1(vuint8m1_t op1,uint8_t op2,vbool8_t carryin,size_t vl)574 vuint8m1_t test_vadc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t carryin,
575 size_t vl) {
576 return vadc_vxm_u8m1(op1, op2, carryin, vl);
577 }
578
579 //
580 // CHECK-RV64-LABEL: @test_vadc_vvm_u8m2(
581 // CHECK-RV64-NEXT: entry:
582 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
583 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
584 //
test_vadc_vvm_u8m2(vuint8m2_t op1,vuint8m2_t op2,vbool4_t carryin,size_t vl)585 vuint8m2_t test_vadc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin,
586 size_t vl) {
587 return vadc_vvm_u8m2(op1, op2, carryin, vl);
588 }
589
590 //
591 // CHECK-RV64-LABEL: @test_vadc_vxm_u8m2(
592 // CHECK-RV64-NEXT: entry:
593 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
594 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
595 //
test_vadc_vxm_u8m2(vuint8m2_t op1,uint8_t op2,vbool4_t carryin,size_t vl)596 vuint8m2_t test_vadc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t carryin,
597 size_t vl) {
598 return vadc_vxm_u8m2(op1, op2, carryin, vl);
599 }
600
601 //
602 // CHECK-RV64-LABEL: @test_vadc_vvm_u8m4(
603 // CHECK-RV64-NEXT: entry:
604 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
605 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
606 //
test_vadc_vvm_u8m4(vuint8m4_t op1,vuint8m4_t op2,vbool2_t carryin,size_t vl)607 vuint8m4_t test_vadc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin,
608 size_t vl) {
609 return vadc_vvm_u8m4(op1, op2, carryin, vl);
610 }
611
612 //
613 // CHECK-RV64-LABEL: @test_vadc_vxm_u8m4(
614 // CHECK-RV64-NEXT: entry:
615 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
616 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
617 //
test_vadc_vxm_u8m4(vuint8m4_t op1,uint8_t op2,vbool2_t carryin,size_t vl)618 vuint8m4_t test_vadc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t carryin,
619 size_t vl) {
620 return vadc_vxm_u8m4(op1, op2, carryin, vl);
621 }
622
623 //
624 // CHECK-RV64-LABEL: @test_vadc_vvm_u8m8(
625 // CHECK-RV64-NEXT: entry:
626 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
627 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
628 //
test_vadc_vvm_u8m8(vuint8m8_t op1,vuint8m8_t op2,vbool1_t carryin,size_t vl)629 vuint8m8_t test_vadc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin,
630 size_t vl) {
631 return vadc_vvm_u8m8(op1, op2, carryin, vl);
632 }
633
634 //
635 // CHECK-RV64-LABEL: @test_vadc_vxm_u8m8(
636 // CHECK-RV64-NEXT: entry:
637 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
638 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
639 //
test_vadc_vxm_u8m8(vuint8m8_t op1,uint8_t op2,vbool1_t carryin,size_t vl)640 vuint8m8_t test_vadc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t carryin,
641 size_t vl) {
642 return vadc_vxm_u8m8(op1, op2, carryin, vl);
643 }
644
645 //
646 // CHECK-RV64-LABEL: @test_vadc_vvm_u16mf4(
647 // CHECK-RV64-NEXT: entry:
648 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
649 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
650 //
test_vadc_vvm_u16mf4(vuint16mf4_t op1,vuint16mf4_t op2,vbool64_t carryin,size_t vl)651 vuint16mf4_t test_vadc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
652 vbool64_t carryin, size_t vl) {
653 return vadc_vvm_u16mf4(op1, op2, carryin, vl);
654 }
655
656 //
657 // CHECK-RV64-LABEL: @test_vadc_vxm_u16mf4(
658 // CHECK-RV64-NEXT: entry:
659 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
660 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
661 //
test_vadc_vxm_u16mf4(vuint16mf4_t op1,uint16_t op2,vbool64_t carryin,size_t vl)662 vuint16mf4_t test_vadc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2,
663 vbool64_t carryin, size_t vl) {
664 return vadc_vxm_u16mf4(op1, op2, carryin, vl);
665 }
666
667 //
668 // CHECK-RV64-LABEL: @test_vadc_vvm_u16mf2(
669 // CHECK-RV64-NEXT: entry:
670 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
671 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
672 //
test_vadc_vvm_u16mf2(vuint16mf2_t op1,vuint16mf2_t op2,vbool32_t carryin,size_t vl)673 vuint16mf2_t test_vadc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
674 vbool32_t carryin, size_t vl) {
675 return vadc_vvm_u16mf2(op1, op2, carryin, vl);
676 }
677
678 //
679 // CHECK-RV64-LABEL: @test_vadc_vxm_u16mf2(
680 // CHECK-RV64-NEXT: entry:
681 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
682 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
683 //
test_vadc_vxm_u16mf2(vuint16mf2_t op1,uint16_t op2,vbool32_t carryin,size_t vl)684 vuint16mf2_t test_vadc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2,
685 vbool32_t carryin, size_t vl) {
686 return vadc_vxm_u16mf2(op1, op2, carryin, vl);
687 }
688
689 //
690 // CHECK-RV64-LABEL: @test_vadc_vvm_u16m1(
691 // CHECK-RV64-NEXT: entry:
692 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
693 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
694 //
test_vadc_vvm_u16m1(vuint16m1_t op1,vuint16m1_t op2,vbool16_t carryin,size_t vl)695 vuint16m1_t test_vadc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2,
696 vbool16_t carryin, size_t vl) {
697 return vadc_vvm_u16m1(op1, op2, carryin, vl);
698 }
699
700 //
701 // CHECK-RV64-LABEL: @test_vadc_vxm_u16m1(
702 // CHECK-RV64-NEXT: entry:
703 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
704 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
705 //
test_vadc_vxm_u16m1(vuint16m1_t op1,uint16_t op2,vbool16_t carryin,size_t vl)706 vuint16m1_t test_vadc_vxm_u16m1(vuint16m1_t op1, uint16_t op2,
707 vbool16_t carryin, size_t vl) {
708 return vadc_vxm_u16m1(op1, op2, carryin, vl);
709 }
710
711 //
712 // CHECK-RV64-LABEL: @test_vadc_vvm_u16m2(
713 // CHECK-RV64-NEXT: entry:
714 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
715 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
716 //
test_vadc_vvm_u16m2(vuint16m2_t op1,vuint16m2_t op2,vbool8_t carryin,size_t vl)717 vuint16m2_t test_vadc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2,
718 vbool8_t carryin, size_t vl) {
719 return vadc_vvm_u16m2(op1, op2, carryin, vl);
720 }
721
722 //
723 // CHECK-RV64-LABEL: @test_vadc_vxm_u16m2(
724 // CHECK-RV64-NEXT: entry:
725 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
726 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
727 //
test_vadc_vxm_u16m2(vuint16m2_t op1,uint16_t op2,vbool8_t carryin,size_t vl)728 vuint16m2_t test_vadc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t carryin,
729 size_t vl) {
730 return vadc_vxm_u16m2(op1, op2, carryin, vl);
731 }
732
733 //
734 // CHECK-RV64-LABEL: @test_vadc_vvm_u16m4(
735 // CHECK-RV64-NEXT: entry:
736 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
737 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
738 //
test_vadc_vvm_u16m4(vuint16m4_t op1,vuint16m4_t op2,vbool4_t carryin,size_t vl)739 vuint16m4_t test_vadc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2,
740 vbool4_t carryin, size_t vl) {
741 return vadc_vvm_u16m4(op1, op2, carryin, vl);
742 }
743
744 //
745 // CHECK-RV64-LABEL: @test_vadc_vxm_u16m4(
746 // CHECK-RV64-NEXT: entry:
747 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
748 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
749 //
test_vadc_vxm_u16m4(vuint16m4_t op1,uint16_t op2,vbool4_t carryin,size_t vl)750 vuint16m4_t test_vadc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t carryin,
751 size_t vl) {
752 return vadc_vxm_u16m4(op1, op2, carryin, vl);
753 }
754
755 //
756 // CHECK-RV64-LABEL: @test_vadc_vvm_u16m8(
757 // CHECK-RV64-NEXT: entry:
758 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
759 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
760 //
test_vadc_vvm_u16m8(vuint16m8_t op1,vuint16m8_t op2,vbool2_t carryin,size_t vl)761 vuint16m8_t test_vadc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2,
762 vbool2_t carryin, size_t vl) {
763 return vadc_vvm_u16m8(op1, op2, carryin, vl);
764 }
765
766 //
767 // CHECK-RV64-LABEL: @test_vadc_vxm_u16m8(
768 // CHECK-RV64-NEXT: entry:
769 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
770 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
771 //
test_vadc_vxm_u16m8(vuint16m8_t op1,uint16_t op2,vbool2_t carryin,size_t vl)772 vuint16m8_t test_vadc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t carryin,
773 size_t vl) {
774 return vadc_vxm_u16m8(op1, op2, carryin, vl);
775 }
776
777 //
778 // CHECK-RV64-LABEL: @test_vadc_vvm_u32mf2(
779 // CHECK-RV64-NEXT: entry:
780 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
781 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
782 //
test_vadc_vvm_u32mf2(vuint32mf2_t op1,vuint32mf2_t op2,vbool64_t carryin,size_t vl)783 vuint32mf2_t test_vadc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
784 vbool64_t carryin, size_t vl) {
785 return vadc_vvm_u32mf2(op1, op2, carryin, vl);
786 }
787
788 //
789 // CHECK-RV64-LABEL: @test_vadc_vxm_u32mf2(
790 // CHECK-RV64-NEXT: entry:
791 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
792 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
793 //
test_vadc_vxm_u32mf2(vuint32mf2_t op1,uint32_t op2,vbool64_t carryin,size_t vl)794 vuint32mf2_t test_vadc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2,
795 vbool64_t carryin, size_t vl) {
796 return vadc_vxm_u32mf2(op1, op2, carryin, vl);
797 }
798
799 //
800 // CHECK-RV64-LABEL: @test_vadc_vvm_u32m1(
801 // CHECK-RV64-NEXT: entry:
802 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
803 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
804 //
test_vadc_vvm_u32m1(vuint32m1_t op1,vuint32m1_t op2,vbool32_t carryin,size_t vl)805 vuint32m1_t test_vadc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2,
806 vbool32_t carryin, size_t vl) {
807 return vadc_vvm_u32m1(op1, op2, carryin, vl);
808 }
809
810 //
811 // CHECK-RV64-LABEL: @test_vadc_vxm_u32m1(
812 // CHECK-RV64-NEXT: entry:
813 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
814 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
815 //
test_vadc_vxm_u32m1(vuint32m1_t op1,uint32_t op2,vbool32_t carryin,size_t vl)816 vuint32m1_t test_vadc_vxm_u32m1(vuint32m1_t op1, uint32_t op2,
817 vbool32_t carryin, size_t vl) {
818 return vadc_vxm_u32m1(op1, op2, carryin, vl);
819 }
820
821 //
822 // CHECK-RV64-LABEL: @test_vadc_vvm_u32m2(
823 // CHECK-RV64-NEXT: entry:
824 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
825 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
826 //
test_vadc_vvm_u32m2(vuint32m2_t op1,vuint32m2_t op2,vbool16_t carryin,size_t vl)827 vuint32m2_t test_vadc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2,
828 vbool16_t carryin, size_t vl) {
829 return vadc_vvm_u32m2(op1, op2, carryin, vl);
830 }
831
832 //
833 // CHECK-RV64-LABEL: @test_vadc_vxm_u32m2(
834 // CHECK-RV64-NEXT: entry:
835 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
836 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
837 //
test_vadc_vxm_u32m2(vuint32m2_t op1,uint32_t op2,vbool16_t carryin,size_t vl)838 vuint32m2_t test_vadc_vxm_u32m2(vuint32m2_t op1, uint32_t op2,
839 vbool16_t carryin, size_t vl) {
840 return vadc_vxm_u32m2(op1, op2, carryin, vl);
841 }
842
843 //
844 // CHECK-RV64-LABEL: @test_vadc_vvm_u32m4(
845 // CHECK-RV64-NEXT: entry:
846 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
847 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
848 //
test_vadc_vvm_u32m4(vuint32m4_t op1,vuint32m4_t op2,vbool8_t carryin,size_t vl)849 vuint32m4_t test_vadc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2,
850 vbool8_t carryin, size_t vl) {
851 return vadc_vvm_u32m4(op1, op2, carryin, vl);
852 }
853
854 //
855 // CHECK-RV64-LABEL: @test_vadc_vxm_u32m4(
856 // CHECK-RV64-NEXT: entry:
857 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
858 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
859 //
test_vadc_vxm_u32m4(vuint32m4_t op1,uint32_t op2,vbool8_t carryin,size_t vl)860 vuint32m4_t test_vadc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t carryin,
861 size_t vl) {
862 return vadc_vxm_u32m4(op1, op2, carryin, vl);
863 }
864
865 //
866 // CHECK-RV64-LABEL: @test_vadc_vvm_u32m8(
867 // CHECK-RV64-NEXT: entry:
868 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
869 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
870 //
test_vadc_vvm_u32m8(vuint32m8_t op1,vuint32m8_t op2,vbool4_t carryin,size_t vl)871 vuint32m8_t test_vadc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2,
872 vbool4_t carryin, size_t vl) {
873 return vadc_vvm_u32m8(op1, op2, carryin, vl);
874 }
875
876 //
877 // CHECK-RV64-LABEL: @test_vadc_vxm_u32m8(
878 // CHECK-RV64-NEXT: entry:
879 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
880 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
881 //
test_vadc_vxm_u32m8(vuint32m8_t op1,uint32_t op2,vbool4_t carryin,size_t vl)882 vuint32m8_t test_vadc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t carryin,
883 size_t vl) {
884 return vadc_vxm_u32m8(op1, op2, carryin, vl);
885 }
886
887 //
888 // CHECK-RV64-LABEL: @test_vadc_vvm_u64m1(
889 // CHECK-RV64-NEXT: entry:
890 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
891 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
892 //
test_vadc_vvm_u64m1(vuint64m1_t op1,vuint64m1_t op2,vbool64_t carryin,size_t vl)893 vuint64m1_t test_vadc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2,
894 vbool64_t carryin, size_t vl) {
895 return vadc_vvm_u64m1(op1, op2, carryin, vl);
896 }
897
898 //
899 // CHECK-RV64-LABEL: @test_vadc_vxm_u64m1(
900 // CHECK-RV64-NEXT: entry:
901 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
902 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
903 //
test_vadc_vxm_u64m1(vuint64m1_t op1,uint64_t op2,vbool64_t carryin,size_t vl)904 vuint64m1_t test_vadc_vxm_u64m1(vuint64m1_t op1, uint64_t op2,
905 vbool64_t carryin, size_t vl) {
906 return vadc_vxm_u64m1(op1, op2, carryin, vl);
907 }
908
909 //
910 // CHECK-RV64-LABEL: @test_vadc_vvm_u64m2(
911 // CHECK-RV64-NEXT: entry:
912 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
913 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
914 //
test_vadc_vvm_u64m2(vuint64m2_t op1,vuint64m2_t op2,vbool32_t carryin,size_t vl)915 vuint64m2_t test_vadc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2,
916 vbool32_t carryin, size_t vl) {
917 return vadc_vvm_u64m2(op1, op2, carryin, vl);
918 }
919
920 //
921 // CHECK-RV64-LABEL: @test_vadc_vxm_u64m2(
922 // CHECK-RV64-NEXT: entry:
923 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
924 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
925 //
test_vadc_vxm_u64m2(vuint64m2_t op1,uint64_t op2,vbool32_t carryin,size_t vl)926 vuint64m2_t test_vadc_vxm_u64m2(vuint64m2_t op1, uint64_t op2,
927 vbool32_t carryin, size_t vl) {
928 return vadc_vxm_u64m2(op1, op2, carryin, vl);
929 }
930
931 //
932 // CHECK-RV64-LABEL: @test_vadc_vvm_u64m4(
933 // CHECK-RV64-NEXT: entry:
934 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
935 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
936 //
test_vadc_vvm_u64m4(vuint64m4_t op1,vuint64m4_t op2,vbool16_t carryin,size_t vl)937 vuint64m4_t test_vadc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2,
938 vbool16_t carryin, size_t vl) {
939 return vadc_vvm_u64m4(op1, op2, carryin, vl);
940 }
941
942 //
943 // CHECK-RV64-LABEL: @test_vadc_vxm_u64m4(
944 // CHECK-RV64-NEXT: entry:
945 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
946 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
947 //
test_vadc_vxm_u64m4(vuint64m4_t op1,uint64_t op2,vbool16_t carryin,size_t vl)948 vuint64m4_t test_vadc_vxm_u64m4(vuint64m4_t op1, uint64_t op2,
949 vbool16_t carryin, size_t vl) {
950 return vadc_vxm_u64m4(op1, op2, carryin, vl);
951 }
952
953 //
954 // CHECK-RV64-LABEL: @test_vadc_vvm_u64m8(
955 // CHECK-RV64-NEXT: entry:
956 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
957 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
958 //
test_vadc_vvm_u64m8(vuint64m8_t op1,vuint64m8_t op2,vbool8_t carryin,size_t vl)959 vuint64m8_t test_vadc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2,
960 vbool8_t carryin, size_t vl) {
961 return vadc_vvm_u64m8(op1, op2, carryin, vl);
962 }
963
964 //
965 // CHECK-RV64-LABEL: @test_vadc_vxm_u64m8(
966 // CHECK-RV64-NEXT: entry:
967 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
968 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
969 //
test_vadc_vxm_u64m8(vuint64m8_t op1,uint64_t op2,vbool8_t carryin,size_t vl)970 vuint64m8_t test_vadc_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t carryin,
971 size_t vl) {
972 return vadc_vxm_u64m8(op1, op2, carryin, vl);
973 }
974