1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
4
5 #include <riscv_vector.h>
6
7 //
8 // CHECK-RV64-LABEL: @test_vmadc_vvm_i8mf8_b64(
9 // CHECK-RV64-NEXT: entry:
10 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
11 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
12 //
test_vmadc_vvm_i8mf8_b64(vint8mf8_t op1,vint8mf8_t op2,vbool64_t carryin,size_t vl)13 vbool64_t test_vmadc_vvm_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2,
14 vbool64_t carryin, size_t vl) {
15 return vmadc_vvm_i8mf8_b64(op1, op2, carryin, vl);
16 }
17
18 //
19 // CHECK-RV64-LABEL: @test_vmadc_vxm_i8mf8_b64(
20 // CHECK-RV64-NEXT: entry:
21 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
22 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
23 //
test_vmadc_vxm_i8mf8_b64(vint8mf8_t op1,int8_t op2,vbool64_t carryin,size_t vl)24 vbool64_t test_vmadc_vxm_i8mf8_b64(vint8mf8_t op1, int8_t op2,
25 vbool64_t carryin, size_t vl) {
26 return vmadc_vxm_i8mf8_b64(op1, op2, carryin, vl);
27 }
28
29 //
30 // CHECK-RV64-LABEL: @test_vmadc_vv_i8mf8_b64(
31 // CHECK-RV64-NEXT: entry:
32 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
33 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
34 //
test_vmadc_vv_i8mf8_b64(vint8mf8_t op1,vint8mf8_t op2,size_t vl)35 vbool64_t test_vmadc_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
36 return vmadc_vv_i8mf8_b64(op1, op2, vl);
37 }
38
39 //
40 // CHECK-RV64-LABEL: @test_vmadc_vx_i8mf8_b64(
41 // CHECK-RV64-NEXT: entry:
42 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
43 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
44 //
test_vmadc_vx_i8mf8_b64(vint8mf8_t op1,int8_t op2,size_t vl)45 vbool64_t test_vmadc_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
46 return vmadc_vx_i8mf8_b64(op1, op2, vl);
47 }
48
49 //
50 // CHECK-RV64-LABEL: @test_vmadc_vvm_i8mf4_b32(
51 // CHECK-RV64-NEXT: entry:
52 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
53 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
54 //
test_vmadc_vvm_i8mf4_b32(vint8mf4_t op1,vint8mf4_t op2,vbool32_t carryin,size_t vl)55 vbool32_t test_vmadc_vvm_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2,
56 vbool32_t carryin, size_t vl) {
57 return vmadc_vvm_i8mf4_b32(op1, op2, carryin, vl);
58 }
59
60 //
61 // CHECK-RV64-LABEL: @test_vmadc_vxm_i8mf4_b32(
62 // CHECK-RV64-NEXT: entry:
63 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
64 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
65 //
test_vmadc_vxm_i8mf4_b32(vint8mf4_t op1,int8_t op2,vbool32_t carryin,size_t vl)66 vbool32_t test_vmadc_vxm_i8mf4_b32(vint8mf4_t op1, int8_t op2,
67 vbool32_t carryin, size_t vl) {
68 return vmadc_vxm_i8mf4_b32(op1, op2, carryin, vl);
69 }
70
71 //
72 // CHECK-RV64-LABEL: @test_vmadc_vv_i8mf4_b32(
73 // CHECK-RV64-NEXT: entry:
74 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
75 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
76 //
test_vmadc_vv_i8mf4_b32(vint8mf4_t op1,vint8mf4_t op2,size_t vl)77 vbool32_t test_vmadc_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
78 return vmadc_vv_i8mf4_b32(op1, op2, vl);
79 }
80
81 //
82 // CHECK-RV64-LABEL: @test_vmadc_vx_i8mf4_b32(
83 // CHECK-RV64-NEXT: entry:
84 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
85 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
86 //
test_vmadc_vx_i8mf4_b32(vint8mf4_t op1,int8_t op2,size_t vl)87 vbool32_t test_vmadc_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
88 return vmadc_vx_i8mf4_b32(op1, op2, vl);
89 }
90
91 //
92 // CHECK-RV64-LABEL: @test_vmadc_vvm_i8mf2_b16(
93 // CHECK-RV64-NEXT: entry:
94 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
95 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
96 //
test_vmadc_vvm_i8mf2_b16(vint8mf2_t op1,vint8mf2_t op2,vbool16_t carryin,size_t vl)97 vbool16_t test_vmadc_vvm_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2,
98 vbool16_t carryin, size_t vl) {
99 return vmadc_vvm_i8mf2_b16(op1, op2, carryin, vl);
100 }
101
102 //
103 // CHECK-RV64-LABEL: @test_vmadc_vxm_i8mf2_b16(
104 // CHECK-RV64-NEXT: entry:
105 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
106 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
107 //
test_vmadc_vxm_i8mf2_b16(vint8mf2_t op1,int8_t op2,vbool16_t carryin,size_t vl)108 vbool16_t test_vmadc_vxm_i8mf2_b16(vint8mf2_t op1, int8_t op2,
109 vbool16_t carryin, size_t vl) {
110 return vmadc_vxm_i8mf2_b16(op1, op2, carryin, vl);
111 }
112
113 //
114 // CHECK-RV64-LABEL: @test_vmadc_vv_i8mf2_b16(
115 // CHECK-RV64-NEXT: entry:
116 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
117 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
118 //
test_vmadc_vv_i8mf2_b16(vint8mf2_t op1,vint8mf2_t op2,size_t vl)119 vbool16_t test_vmadc_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
120 return vmadc_vv_i8mf2_b16(op1, op2, vl);
121 }
122
123 //
124 // CHECK-RV64-LABEL: @test_vmadc_vx_i8mf2_b16(
125 // CHECK-RV64-NEXT: entry:
126 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
127 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
128 //
test_vmadc_vx_i8mf2_b16(vint8mf2_t op1,int8_t op2,size_t vl)129 vbool16_t test_vmadc_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
130 return vmadc_vx_i8mf2_b16(op1, op2, vl);
131 }
132
133 //
134 // CHECK-RV64-LABEL: @test_vmadc_vvm_i8m1_b8(
135 // CHECK-RV64-NEXT: entry:
136 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
137 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
138 //
test_vmadc_vvm_i8m1_b8(vint8m1_t op1,vint8m1_t op2,vbool8_t carryin,size_t vl)139 vbool8_t test_vmadc_vvm_i8m1_b8(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin,
140 size_t vl) {
141 return vmadc_vvm_i8m1_b8(op1, op2, carryin, vl);
142 }
143
144 //
145 // CHECK-RV64-LABEL: @test_vmadc_vxm_i8m1_b8(
146 // CHECK-RV64-NEXT: entry:
147 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
148 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
149 //
test_vmadc_vxm_i8m1_b8(vint8m1_t op1,int8_t op2,vbool8_t carryin,size_t vl)150 vbool8_t test_vmadc_vxm_i8m1_b8(vint8m1_t op1, int8_t op2, vbool8_t carryin,
151 size_t vl) {
152 return vmadc_vxm_i8m1_b8(op1, op2, carryin, vl);
153 }
154
155 //
156 // CHECK-RV64-LABEL: @test_vmadc_vv_i8m1_b8(
157 // CHECK-RV64-NEXT: entry:
158 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
159 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
160 //
test_vmadc_vv_i8m1_b8(vint8m1_t op1,vint8m1_t op2,size_t vl)161 vbool8_t test_vmadc_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
162 return vmadc_vv_i8m1_b8(op1, op2, vl);
163 }
164
165 //
166 // CHECK-RV64-LABEL: @test_vmadc_vx_i8m1_b8(
167 // CHECK-RV64-NEXT: entry:
168 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
169 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
170 //
test_vmadc_vx_i8m1_b8(vint8m1_t op1,int8_t op2,size_t vl)171 vbool8_t test_vmadc_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
172 return vmadc_vx_i8m1_b8(op1, op2, vl);
173 }
174
175 //
176 // CHECK-RV64-LABEL: @test_vmadc_vvm_i8m2_b4(
177 // CHECK-RV64-NEXT: entry:
178 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
179 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
180 //
test_vmadc_vvm_i8m2_b4(vint8m2_t op1,vint8m2_t op2,vbool4_t carryin,size_t vl)181 vbool4_t test_vmadc_vvm_i8m2_b4(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin,
182 size_t vl) {
183 return vmadc_vvm_i8m2_b4(op1, op2, carryin, vl);
184 }
185
186 //
187 // CHECK-RV64-LABEL: @test_vmadc_vxm_i8m2_b4(
188 // CHECK-RV64-NEXT: entry:
189 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
190 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
191 //
test_vmadc_vxm_i8m2_b4(vint8m2_t op1,int8_t op2,vbool4_t carryin,size_t vl)192 vbool4_t test_vmadc_vxm_i8m2_b4(vint8m2_t op1, int8_t op2, vbool4_t carryin,
193 size_t vl) {
194 return vmadc_vxm_i8m2_b4(op1, op2, carryin, vl);
195 }
196
197 //
198 // CHECK-RV64-LABEL: @test_vmadc_vv_i8m2_b4(
199 // CHECK-RV64-NEXT: entry:
200 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
201 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
202 //
test_vmadc_vv_i8m2_b4(vint8m2_t op1,vint8m2_t op2,size_t vl)203 vbool4_t test_vmadc_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
204 return vmadc_vv_i8m2_b4(op1, op2, vl);
205 }
206
207 //
208 // CHECK-RV64-LABEL: @test_vmadc_vx_i8m2_b4(
209 // CHECK-RV64-NEXT: entry:
210 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
211 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
212 //
test_vmadc_vx_i8m2_b4(vint8m2_t op1,int8_t op2,size_t vl)213 vbool4_t test_vmadc_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
214 return vmadc_vx_i8m2_b4(op1, op2, vl);
215 }
216
217 //
218 // CHECK-RV64-LABEL: @test_vmadc_vvm_i8m4_b2(
219 // CHECK-RV64-NEXT: entry:
220 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
221 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
222 //
test_vmadc_vvm_i8m4_b2(vint8m4_t op1,vint8m4_t op2,vbool2_t carryin,size_t vl)223 vbool2_t test_vmadc_vvm_i8m4_b2(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin,
224 size_t vl) {
225 return vmadc_vvm_i8m4_b2(op1, op2, carryin, vl);
226 }
227
228 //
229 // CHECK-RV64-LABEL: @test_vmadc_vxm_i8m4_b2(
230 // CHECK-RV64-NEXT: entry:
231 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
232 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
233 //
test_vmadc_vxm_i8m4_b2(vint8m4_t op1,int8_t op2,vbool2_t carryin,size_t vl)234 vbool2_t test_vmadc_vxm_i8m4_b2(vint8m4_t op1, int8_t op2, vbool2_t carryin,
235 size_t vl) {
236 return vmadc_vxm_i8m4_b2(op1, op2, carryin, vl);
237 }
238
239 //
240 // CHECK-RV64-LABEL: @test_vmadc_vv_i8m4_b2(
241 // CHECK-RV64-NEXT: entry:
242 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
243 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
244 //
test_vmadc_vv_i8m4_b2(vint8m4_t op1,vint8m4_t op2,size_t vl)245 vbool2_t test_vmadc_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
246 return vmadc_vv_i8m4_b2(op1, op2, vl);
247 }
248
249 //
250 // CHECK-RV64-LABEL: @test_vmadc_vx_i8m4_b2(
251 // CHECK-RV64-NEXT: entry:
252 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
253 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
254 //
test_vmadc_vx_i8m4_b2(vint8m4_t op1,int8_t op2,size_t vl)255 vbool2_t test_vmadc_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
256 return vmadc_vx_i8m4_b2(op1, op2, vl);
257 }
258
259 //
260 // CHECK-RV64-LABEL: @test_vmadc_vvm_i8m8_b1(
261 // CHECK-RV64-NEXT: entry:
262 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
263 // CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
264 //
test_vmadc_vvm_i8m8_b1(vint8m8_t op1,vint8m8_t op2,vbool1_t carryin,size_t vl)265 vbool1_t test_vmadc_vvm_i8m8_b1(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin,
266 size_t vl) {
267 return vmadc_vvm_i8m8_b1(op1, op2, carryin, vl);
268 }
269
270 //
271 // CHECK-RV64-LABEL: @test_vmadc_vxm_i8m8_b1(
272 // CHECK-RV64-NEXT: entry:
273 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
274 // CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
275 //
test_vmadc_vxm_i8m8_b1(vint8m8_t op1,int8_t op2,vbool1_t carryin,size_t vl)276 vbool1_t test_vmadc_vxm_i8m8_b1(vint8m8_t op1, int8_t op2, vbool1_t carryin,
277 size_t vl) {
278 return vmadc_vxm_i8m8_b1(op1, op2, carryin, vl);
279 }
280
281 //
282 // CHECK-RV64-LABEL: @test_vmadc_vv_i8m8_b1(
283 // CHECK-RV64-NEXT: entry:
284 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
285 // CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
286 //
test_vmadc_vv_i8m8_b1(vint8m8_t op1,vint8m8_t op2,size_t vl)287 vbool1_t test_vmadc_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
288 return vmadc_vv_i8m8_b1(op1, op2, vl);
289 }
290
291 //
292 // CHECK-RV64-LABEL: @test_vmadc_vx_i8m8_b1(
293 // CHECK-RV64-NEXT: entry:
294 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
295 // CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
296 //
test_vmadc_vx_i8m8_b1(vint8m8_t op1,int8_t op2,size_t vl)297 vbool1_t test_vmadc_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
298 return vmadc_vx_i8m8_b1(op1, op2, vl);
299 }
300
301 //
302 // CHECK-RV64-LABEL: @test_vmadc_vvm_i16mf4_b64(
303 // CHECK-RV64-NEXT: entry:
304 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
305 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
306 //
test_vmadc_vvm_i16mf4_b64(vint16mf4_t op1,vint16mf4_t op2,vbool64_t carryin,size_t vl)307 vbool64_t test_vmadc_vvm_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
308 vbool64_t carryin, size_t vl) {
309 return vmadc_vvm_i16mf4_b64(op1, op2, carryin, vl);
310 }
311
312 //
313 // CHECK-RV64-LABEL: @test_vmadc_vxm_i16mf4_b64(
314 // CHECK-RV64-NEXT: entry:
315 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
316 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
317 //
test_vmadc_vxm_i16mf4_b64(vint16mf4_t op1,int16_t op2,vbool64_t carryin,size_t vl)318 vbool64_t test_vmadc_vxm_i16mf4_b64(vint16mf4_t op1, int16_t op2,
319 vbool64_t carryin, size_t vl) {
320 return vmadc_vxm_i16mf4_b64(op1, op2, carryin, vl);
321 }
322
323 //
324 // CHECK-RV64-LABEL: @test_vmadc_vv_i16mf4_b64(
325 // CHECK-RV64-NEXT: entry:
326 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
327 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
328 //
test_vmadc_vv_i16mf4_b64(vint16mf4_t op1,vint16mf4_t op2,size_t vl)329 vbool64_t test_vmadc_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
330 size_t vl) {
331 return vmadc_vv_i16mf4_b64(op1, op2, vl);
332 }
333
334 //
335 // CHECK-RV64-LABEL: @test_vmadc_vx_i16mf4_b64(
336 // CHECK-RV64-NEXT: entry:
337 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
338 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
339 //
test_vmadc_vx_i16mf4_b64(vint16mf4_t op1,int16_t op2,size_t vl)340 vbool64_t test_vmadc_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
341 return vmadc_vx_i16mf4_b64(op1, op2, vl);
342 }
343
344 //
345 // CHECK-RV64-LABEL: @test_vmadc_vvm_i16mf2_b32(
346 // CHECK-RV64-NEXT: entry:
347 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
348 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
349 //
test_vmadc_vvm_i16mf2_b32(vint16mf2_t op1,vint16mf2_t op2,vbool32_t carryin,size_t vl)350 vbool32_t test_vmadc_vvm_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
351 vbool32_t carryin, size_t vl) {
352 return vmadc_vvm_i16mf2_b32(op1, op2, carryin, vl);
353 }
354
355 //
356 // CHECK-RV64-LABEL: @test_vmadc_vxm_i16mf2_b32(
357 // CHECK-RV64-NEXT: entry:
358 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
359 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
360 //
test_vmadc_vxm_i16mf2_b32(vint16mf2_t op1,int16_t op2,vbool32_t carryin,size_t vl)361 vbool32_t test_vmadc_vxm_i16mf2_b32(vint16mf2_t op1, int16_t op2,
362 vbool32_t carryin, size_t vl) {
363 return vmadc_vxm_i16mf2_b32(op1, op2, carryin, vl);
364 }
365
366 //
367 // CHECK-RV64-LABEL: @test_vmadc_vv_i16mf2_b32(
368 // CHECK-RV64-NEXT: entry:
369 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
370 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
371 //
test_vmadc_vv_i16mf2_b32(vint16mf2_t op1,vint16mf2_t op2,size_t vl)372 vbool32_t test_vmadc_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
373 size_t vl) {
374 return vmadc_vv_i16mf2_b32(op1, op2, vl);
375 }
376
377 //
378 // CHECK-RV64-LABEL: @test_vmadc_vx_i16mf2_b32(
379 // CHECK-RV64-NEXT: entry:
380 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
381 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
382 //
test_vmadc_vx_i16mf2_b32(vint16mf2_t op1,int16_t op2,size_t vl)383 vbool32_t test_vmadc_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
384 return vmadc_vx_i16mf2_b32(op1, op2, vl);
385 }
386
387 //
388 // CHECK-RV64-LABEL: @test_vmadc_vvm_i16m1_b16(
389 // CHECK-RV64-NEXT: entry:
390 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
391 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
392 //
test_vmadc_vvm_i16m1_b16(vint16m1_t op1,vint16m1_t op2,vbool16_t carryin,size_t vl)393 vbool16_t test_vmadc_vvm_i16m1_b16(vint16m1_t op1, vint16m1_t op2,
394 vbool16_t carryin, size_t vl) {
395 return vmadc_vvm_i16m1_b16(op1, op2, carryin, vl);
396 }
397
398 //
399 // CHECK-RV64-LABEL: @test_vmadc_vxm_i16m1_b16(
400 // CHECK-RV64-NEXT: entry:
401 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
402 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
403 //
test_vmadc_vxm_i16m1_b16(vint16m1_t op1,int16_t op2,vbool16_t carryin,size_t vl)404 vbool16_t test_vmadc_vxm_i16m1_b16(vint16m1_t op1, int16_t op2,
405 vbool16_t carryin, size_t vl) {
406 return vmadc_vxm_i16m1_b16(op1, op2, carryin, vl);
407 }
408
409 //
410 // CHECK-RV64-LABEL: @test_vmadc_vv_i16m1_b16(
411 // CHECK-RV64-NEXT: entry:
412 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
413 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
414 //
test_vmadc_vv_i16m1_b16(vint16m1_t op1,vint16m1_t op2,size_t vl)415 vbool16_t test_vmadc_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
416 return vmadc_vv_i16m1_b16(op1, op2, vl);
417 }
418
419 //
420 // CHECK-RV64-LABEL: @test_vmadc_vx_i16m1_b16(
421 // CHECK-RV64-NEXT: entry:
422 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
423 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
424 //
test_vmadc_vx_i16m1_b16(vint16m1_t op1,int16_t op2,size_t vl)425 vbool16_t test_vmadc_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
426 return vmadc_vx_i16m1_b16(op1, op2, vl);
427 }
428
429 //
430 // CHECK-RV64-LABEL: @test_vmadc_vvm_i16m2_b8(
431 // CHECK-RV64-NEXT: entry:
432 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
433 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
434 //
test_vmadc_vvm_i16m2_b8(vint16m2_t op1,vint16m2_t op2,vbool8_t carryin,size_t vl)435 vbool8_t test_vmadc_vvm_i16m2_b8(vint16m2_t op1, vint16m2_t op2,
436 vbool8_t carryin, size_t vl) {
437 return vmadc_vvm_i16m2_b8(op1, op2, carryin, vl);
438 }
439
440 //
441 // CHECK-RV64-LABEL: @test_vmadc_vxm_i16m2_b8(
442 // CHECK-RV64-NEXT: entry:
443 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
444 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
445 //
test_vmadc_vxm_i16m2_b8(vint16m2_t op1,int16_t op2,vbool8_t carryin,size_t vl)446 vbool8_t test_vmadc_vxm_i16m2_b8(vint16m2_t op1, int16_t op2, vbool8_t carryin,
447 size_t vl) {
448 return vmadc_vxm_i16m2_b8(op1, op2, carryin, vl);
449 }
450
451 //
452 // CHECK-RV64-LABEL: @test_vmadc_vv_i16m2_b8(
453 // CHECK-RV64-NEXT: entry:
454 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
455 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
456 //
test_vmadc_vv_i16m2_b8(vint16m2_t op1,vint16m2_t op2,size_t vl)457 vbool8_t test_vmadc_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
458 return vmadc_vv_i16m2_b8(op1, op2, vl);
459 }
460
461 //
462 // CHECK-RV64-LABEL: @test_vmadc_vx_i16m2_b8(
463 // CHECK-RV64-NEXT: entry:
464 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
465 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
466 //
test_vmadc_vx_i16m2_b8(vint16m2_t op1,int16_t op2,size_t vl)467 vbool8_t test_vmadc_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
468 return vmadc_vx_i16m2_b8(op1, op2, vl);
469 }
470
471 //
472 // CHECK-RV64-LABEL: @test_vmadc_vvm_i16m4_b4(
473 // CHECK-RV64-NEXT: entry:
474 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
475 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
476 //
test_vmadc_vvm_i16m4_b4(vint16m4_t op1,vint16m4_t op2,vbool4_t carryin,size_t vl)477 vbool4_t test_vmadc_vvm_i16m4_b4(vint16m4_t op1, vint16m4_t op2,
478 vbool4_t carryin, size_t vl) {
479 return vmadc_vvm_i16m4_b4(op1, op2, carryin, vl);
480 }
481
482 //
483 // CHECK-RV64-LABEL: @test_vmadc_vxm_i16m4_b4(
484 // CHECK-RV64-NEXT: entry:
485 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
486 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
487 //
test_vmadc_vxm_i16m4_b4(vint16m4_t op1,int16_t op2,vbool4_t carryin,size_t vl)488 vbool4_t test_vmadc_vxm_i16m4_b4(vint16m4_t op1, int16_t op2, vbool4_t carryin,
489 size_t vl) {
490 return vmadc_vxm_i16m4_b4(op1, op2, carryin, vl);
491 }
492
493 //
494 // CHECK-RV64-LABEL: @test_vmadc_vv_i16m4_b4(
495 // CHECK-RV64-NEXT: entry:
496 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
497 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
498 //
test_vmadc_vv_i16m4_b4(vint16m4_t op1,vint16m4_t op2,size_t vl)499 vbool4_t test_vmadc_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
500 return vmadc_vv_i16m4_b4(op1, op2, vl);
501 }
502
503 //
504 // CHECK-RV64-LABEL: @test_vmadc_vx_i16m4_b4(
505 // CHECK-RV64-NEXT: entry:
506 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
507 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
508 //
test_vmadc_vx_i16m4_b4(vint16m4_t op1,int16_t op2,size_t vl)509 vbool4_t test_vmadc_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
510 return vmadc_vx_i16m4_b4(op1, op2, vl);
511 }
512
513 //
514 // CHECK-RV64-LABEL: @test_vmadc_vvm_i16m8_b2(
515 // CHECK-RV64-NEXT: entry:
516 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
517 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
518 //
test_vmadc_vvm_i16m8_b2(vint16m8_t op1,vint16m8_t op2,vbool2_t carryin,size_t vl)519 vbool2_t test_vmadc_vvm_i16m8_b2(vint16m8_t op1, vint16m8_t op2,
520 vbool2_t carryin, size_t vl) {
521 return vmadc_vvm_i16m8_b2(op1, op2, carryin, vl);
522 }
523
524 //
525 // CHECK-RV64-LABEL: @test_vmadc_vxm_i16m8_b2(
526 // CHECK-RV64-NEXT: entry:
527 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
528 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
529 //
test_vmadc_vxm_i16m8_b2(vint16m8_t op1,int16_t op2,vbool2_t carryin,size_t vl)530 vbool2_t test_vmadc_vxm_i16m8_b2(vint16m8_t op1, int16_t op2, vbool2_t carryin,
531 size_t vl) {
532 return vmadc_vxm_i16m8_b2(op1, op2, carryin, vl);
533 }
534
535 //
536 // CHECK-RV64-LABEL: @test_vmadc_vv_i16m8_b2(
537 // CHECK-RV64-NEXT: entry:
538 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
539 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
540 //
test_vmadc_vv_i16m8_b2(vint16m8_t op1,vint16m8_t op2,size_t vl)541 vbool2_t test_vmadc_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
542 return vmadc_vv_i16m8_b2(op1, op2, vl);
543 }
544
545 //
546 // CHECK-RV64-LABEL: @test_vmadc_vx_i16m8_b2(
547 // CHECK-RV64-NEXT: entry:
548 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
549 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
550 //
test_vmadc_vx_i16m8_b2(vint16m8_t op1,int16_t op2,size_t vl)551 vbool2_t test_vmadc_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
552 return vmadc_vx_i16m8_b2(op1, op2, vl);
553 }
554
555 //
556 // CHECK-RV64-LABEL: @test_vmadc_vvm_i32mf2_b64(
557 // CHECK-RV64-NEXT: entry:
558 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
559 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
560 //
test_vmadc_vvm_i32mf2_b64(vint32mf2_t op1,vint32mf2_t op2,vbool64_t carryin,size_t vl)561 vbool64_t test_vmadc_vvm_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
562 vbool64_t carryin, size_t vl) {
563 return vmadc_vvm_i32mf2_b64(op1, op2, carryin, vl);
564 }
565
566 //
567 // CHECK-RV64-LABEL: @test_vmadc_vxm_i32mf2_b64(
568 // CHECK-RV64-NEXT: entry:
569 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
570 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
571 //
test_vmadc_vxm_i32mf2_b64(vint32mf2_t op1,int32_t op2,vbool64_t carryin,size_t vl)572 vbool64_t test_vmadc_vxm_i32mf2_b64(vint32mf2_t op1, int32_t op2,
573 vbool64_t carryin, size_t vl) {
574 return vmadc_vxm_i32mf2_b64(op1, op2, carryin, vl);
575 }
576
577 //
578 // CHECK-RV64-LABEL: @test_vmadc_vv_i32mf2_b64(
579 // CHECK-RV64-NEXT: entry:
580 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
581 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
582 //
test_vmadc_vv_i32mf2_b64(vint32mf2_t op1,vint32mf2_t op2,size_t vl)583 vbool64_t test_vmadc_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
584 size_t vl) {
585 return vmadc_vv_i32mf2_b64(op1, op2, vl);
586 }
587
588 //
589 // CHECK-RV64-LABEL: @test_vmadc_vx_i32mf2_b64(
590 // CHECK-RV64-NEXT: entry:
591 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
592 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
593 //
test_vmadc_vx_i32mf2_b64(vint32mf2_t op1,int32_t op2,size_t vl)594 vbool64_t test_vmadc_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
595 return vmadc_vx_i32mf2_b64(op1, op2, vl);
596 }
597
598 //
599 // CHECK-RV64-LABEL: @test_vmadc_vvm_i32m1_b32(
600 // CHECK-RV64-NEXT: entry:
601 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
602 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
603 //
test_vmadc_vvm_i32m1_b32(vint32m1_t op1,vint32m1_t op2,vbool32_t carryin,size_t vl)604 vbool32_t test_vmadc_vvm_i32m1_b32(vint32m1_t op1, vint32m1_t op2,
605 vbool32_t carryin, size_t vl) {
606 return vmadc_vvm_i32m1_b32(op1, op2, carryin, vl);
607 }
608
609 //
610 // CHECK-RV64-LABEL: @test_vmadc_vxm_i32m1_b32(
611 // CHECK-RV64-NEXT: entry:
612 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
613 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
614 //
test_vmadc_vxm_i32m1_b32(vint32m1_t op1,int32_t op2,vbool32_t carryin,size_t vl)615 vbool32_t test_vmadc_vxm_i32m1_b32(vint32m1_t op1, int32_t op2,
616 vbool32_t carryin, size_t vl) {
617 return vmadc_vxm_i32m1_b32(op1, op2, carryin, vl);
618 }
619
620 //
621 // CHECK-RV64-LABEL: @test_vmadc_vv_i32m1_b32(
622 // CHECK-RV64-NEXT: entry:
623 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
624 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
625 //
test_vmadc_vv_i32m1_b32(vint32m1_t op1,vint32m1_t op2,size_t vl)626 vbool32_t test_vmadc_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
627 return vmadc_vv_i32m1_b32(op1, op2, vl);
628 }
629
630 //
631 // CHECK-RV64-LABEL: @test_vmadc_vx_i32m1_b32(
632 // CHECK-RV64-NEXT: entry:
633 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
634 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
635 //
test_vmadc_vx_i32m1_b32(vint32m1_t op1,int32_t op2,size_t vl)636 vbool32_t test_vmadc_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
637 return vmadc_vx_i32m1_b32(op1, op2, vl);
638 }
639
640 //
641 // CHECK-RV64-LABEL: @test_vmadc_vvm_i32m2_b16(
642 // CHECK-RV64-NEXT: entry:
643 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
644 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
645 //
test_vmadc_vvm_i32m2_b16(vint32m2_t op1,vint32m2_t op2,vbool16_t carryin,size_t vl)646 vbool16_t test_vmadc_vvm_i32m2_b16(vint32m2_t op1, vint32m2_t op2,
647 vbool16_t carryin, size_t vl) {
648 return vmadc_vvm_i32m2_b16(op1, op2, carryin, vl);
649 }
650
651 //
652 // CHECK-RV64-LABEL: @test_vmadc_vxm_i32m2_b16(
653 // CHECK-RV64-NEXT: entry:
654 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
655 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
656 //
test_vmadc_vxm_i32m2_b16(vint32m2_t op1,int32_t op2,vbool16_t carryin,size_t vl)657 vbool16_t test_vmadc_vxm_i32m2_b16(vint32m2_t op1, int32_t op2,
658 vbool16_t carryin, size_t vl) {
659 return vmadc_vxm_i32m2_b16(op1, op2, carryin, vl);
660 }
661
662 //
663 // CHECK-RV64-LABEL: @test_vmadc_vv_i32m2_b16(
664 // CHECK-RV64-NEXT: entry:
665 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
666 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
667 //
test_vmadc_vv_i32m2_b16(vint32m2_t op1,vint32m2_t op2,size_t vl)668 vbool16_t test_vmadc_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
669 return vmadc_vv_i32m2_b16(op1, op2, vl);
670 }
671
672 //
673 // CHECK-RV64-LABEL: @test_vmadc_vx_i32m2_b16(
674 // CHECK-RV64-NEXT: entry:
675 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
676 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
677 //
test_vmadc_vx_i32m2_b16(vint32m2_t op1,int32_t op2,size_t vl)678 vbool16_t test_vmadc_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
679 return vmadc_vx_i32m2_b16(op1, op2, vl);
680 }
681
682 //
683 // CHECK-RV64-LABEL: @test_vmadc_vvm_i32m4_b8(
684 // CHECK-RV64-NEXT: entry:
685 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
686 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
687 //
test_vmadc_vvm_i32m4_b8(vint32m4_t op1,vint32m4_t op2,vbool8_t carryin,size_t vl)688 vbool8_t test_vmadc_vvm_i32m4_b8(vint32m4_t op1, vint32m4_t op2,
689 vbool8_t carryin, size_t vl) {
690 return vmadc_vvm_i32m4_b8(op1, op2, carryin, vl);
691 }
692
693 //
694 // CHECK-RV64-LABEL: @test_vmadc_vxm_i32m4_b8(
695 // CHECK-RV64-NEXT: entry:
696 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
697 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
698 //
test_vmadc_vxm_i32m4_b8(vint32m4_t op1,int32_t op2,vbool8_t carryin,size_t vl)699 vbool8_t test_vmadc_vxm_i32m4_b8(vint32m4_t op1, int32_t op2, vbool8_t carryin,
700 size_t vl) {
701 return vmadc_vxm_i32m4_b8(op1, op2, carryin, vl);
702 }
703
704 //
705 // CHECK-RV64-LABEL: @test_vmadc_vv_i32m4_b8(
706 // CHECK-RV64-NEXT: entry:
707 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
708 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
709 //
test_vmadc_vv_i32m4_b8(vint32m4_t op1,vint32m4_t op2,size_t vl)710 vbool8_t test_vmadc_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
711 return vmadc_vv_i32m4_b8(op1, op2, vl);
712 }
713
714 //
715 // CHECK-RV64-LABEL: @test_vmadc_vx_i32m4_b8(
716 // CHECK-RV64-NEXT: entry:
717 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
718 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
719 //
test_vmadc_vx_i32m4_b8(vint32m4_t op1,int32_t op2,size_t vl)720 vbool8_t test_vmadc_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
721 return vmadc_vx_i32m4_b8(op1, op2, vl);
722 }
723
724 //
725 // CHECK-RV64-LABEL: @test_vmadc_vvm_i32m8_b4(
726 // CHECK-RV64-NEXT: entry:
727 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
728 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
729 //
test_vmadc_vvm_i32m8_b4(vint32m8_t op1,vint32m8_t op2,vbool4_t carryin,size_t vl)730 vbool4_t test_vmadc_vvm_i32m8_b4(vint32m8_t op1, vint32m8_t op2,
731 vbool4_t carryin, size_t vl) {
732 return vmadc_vvm_i32m8_b4(op1, op2, carryin, vl);
733 }
734
735 //
736 // CHECK-RV64-LABEL: @test_vmadc_vxm_i32m8_b4(
737 // CHECK-RV64-NEXT: entry:
738 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
739 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
740 //
test_vmadc_vxm_i32m8_b4(vint32m8_t op1,int32_t op2,vbool4_t carryin,size_t vl)741 vbool4_t test_vmadc_vxm_i32m8_b4(vint32m8_t op1, int32_t op2, vbool4_t carryin,
742 size_t vl) {
743 return vmadc_vxm_i32m8_b4(op1, op2, carryin, vl);
744 }
745
746 //
747 // CHECK-RV64-LABEL: @test_vmadc_vv_i32m8_b4(
748 // CHECK-RV64-NEXT: entry:
749 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
750 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
751 //
test_vmadc_vv_i32m8_b4(vint32m8_t op1,vint32m8_t op2,size_t vl)752 vbool4_t test_vmadc_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
753 return vmadc_vv_i32m8_b4(op1, op2, vl);
754 }
755
756 //
757 // CHECK-RV64-LABEL: @test_vmadc_vx_i32m8_b4(
758 // CHECK-RV64-NEXT: entry:
759 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
760 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
761 //
test_vmadc_vx_i32m8_b4(vint32m8_t op1,int32_t op2,size_t vl)762 vbool4_t test_vmadc_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
763 return vmadc_vx_i32m8_b4(op1, op2, vl);
764 }
765
766 //
767 // CHECK-RV64-LABEL: @test_vmadc_vvm_i64m1_b64(
768 // CHECK-RV64-NEXT: entry:
769 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
770 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
771 //
test_vmadc_vvm_i64m1_b64(vint64m1_t op1,vint64m1_t op2,vbool64_t carryin,size_t vl)772 vbool64_t test_vmadc_vvm_i64m1_b64(vint64m1_t op1, vint64m1_t op2,
773 vbool64_t carryin, size_t vl) {
774 return vmadc_vvm_i64m1_b64(op1, op2, carryin, vl);
775 }
776
777 //
778 // CHECK-RV64-LABEL: @test_vmadc_vxm_i64m1_b64(
779 // CHECK-RV64-NEXT: entry:
780 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
781 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
782 //
test_vmadc_vxm_i64m1_b64(vint64m1_t op1,int64_t op2,vbool64_t carryin,size_t vl)783 vbool64_t test_vmadc_vxm_i64m1_b64(vint64m1_t op1, int64_t op2,
784 vbool64_t carryin, size_t vl) {
785 return vmadc_vxm_i64m1_b64(op1, op2, carryin, vl);
786 }
787
788 //
789 // CHECK-RV64-LABEL: @test_vmadc_vv_i64m1_b64(
790 // CHECK-RV64-NEXT: entry:
791 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
792 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
793 //
test_vmadc_vv_i64m1_b64(vint64m1_t op1,vint64m1_t op2,size_t vl)794 vbool64_t test_vmadc_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
795 return vmadc_vv_i64m1_b64(op1, op2, vl);
796 }
797
798 //
799 // CHECK-RV64-LABEL: @test_vmadc_vx_i64m1_b64(
800 // CHECK-RV64-NEXT: entry:
801 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
802 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
803 //
test_vmadc_vx_i64m1_b64(vint64m1_t op1,int64_t op2,size_t vl)804 vbool64_t test_vmadc_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
805 return vmadc_vx_i64m1_b64(op1, op2, vl);
806 }
807
808 //
809 // CHECK-RV64-LABEL: @test_vmadc_vvm_i64m2_b32(
810 // CHECK-RV64-NEXT: entry:
811 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
812 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
813 //
test_vmadc_vvm_i64m2_b32(vint64m2_t op1,vint64m2_t op2,vbool32_t carryin,size_t vl)814 vbool32_t test_vmadc_vvm_i64m2_b32(vint64m2_t op1, vint64m2_t op2,
815 vbool32_t carryin, size_t vl) {
816 return vmadc_vvm_i64m2_b32(op1, op2, carryin, vl);
817 }
818
819 //
820 // CHECK-RV64-LABEL: @test_vmadc_vxm_i64m2_b32(
821 // CHECK-RV64-NEXT: entry:
822 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
823 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
824 //
test_vmadc_vxm_i64m2_b32(vint64m2_t op1,int64_t op2,vbool32_t carryin,size_t vl)825 vbool32_t test_vmadc_vxm_i64m2_b32(vint64m2_t op1, int64_t op2,
826 vbool32_t carryin, size_t vl) {
827 return vmadc_vxm_i64m2_b32(op1, op2, carryin, vl);
828 }
829
830 //
831 // CHECK-RV64-LABEL: @test_vmadc_vv_i64m2_b32(
832 // CHECK-RV64-NEXT: entry:
833 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
834 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
835 //
test_vmadc_vv_i64m2_b32(vint64m2_t op1,vint64m2_t op2,size_t vl)836 vbool32_t test_vmadc_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
837 return vmadc_vv_i64m2_b32(op1, op2, vl);
838 }
839
840 //
841 // CHECK-RV64-LABEL: @test_vmadc_vx_i64m2_b32(
842 // CHECK-RV64-NEXT: entry:
843 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
844 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
845 //
test_vmadc_vx_i64m2_b32(vint64m2_t op1,int64_t op2,size_t vl)846 vbool32_t test_vmadc_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
847 return vmadc_vx_i64m2_b32(op1, op2, vl);
848 }
849
850 //
851 // CHECK-RV64-LABEL: @test_vmadc_vvm_i64m4_b16(
852 // CHECK-RV64-NEXT: entry:
853 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
854 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
855 //
test_vmadc_vvm_i64m4_b16(vint64m4_t op1,vint64m4_t op2,vbool16_t carryin,size_t vl)856 vbool16_t test_vmadc_vvm_i64m4_b16(vint64m4_t op1, vint64m4_t op2,
857 vbool16_t carryin, size_t vl) {
858 return vmadc_vvm_i64m4_b16(op1, op2, carryin, vl);
859 }
860
861 //
862 // CHECK-RV64-LABEL: @test_vmadc_vxm_i64m4_b16(
863 // CHECK-RV64-NEXT: entry:
864 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
865 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
866 //
test_vmadc_vxm_i64m4_b16(vint64m4_t op1,int64_t op2,vbool16_t carryin,size_t vl)867 vbool16_t test_vmadc_vxm_i64m4_b16(vint64m4_t op1, int64_t op2,
868 vbool16_t carryin, size_t vl) {
869 return vmadc_vxm_i64m4_b16(op1, op2, carryin, vl);
870 }
871
872 //
873 // CHECK-RV64-LABEL: @test_vmadc_vv_i64m4_b16(
874 // CHECK-RV64-NEXT: entry:
875 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
876 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
877 //
test_vmadc_vv_i64m4_b16(vint64m4_t op1,vint64m4_t op2,size_t vl)878 vbool16_t test_vmadc_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
879 return vmadc_vv_i64m4_b16(op1, op2, vl);
880 }
881
882 //
883 // CHECK-RV64-LABEL: @test_vmadc_vx_i64m4_b16(
884 // CHECK-RV64-NEXT: entry:
885 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
886 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
887 //
test_vmadc_vx_i64m4_b16(vint64m4_t op1,int64_t op2,size_t vl)888 vbool16_t test_vmadc_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
889 return vmadc_vx_i64m4_b16(op1, op2, vl);
890 }
891
892 //
893 // CHECK-RV64-LABEL: @test_vmadc_vvm_i64m8_b8(
894 // CHECK-RV64-NEXT: entry:
895 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
896 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
897 //
test_vmadc_vvm_i64m8_b8(vint64m8_t op1,vint64m8_t op2,vbool8_t carryin,size_t vl)898 vbool8_t test_vmadc_vvm_i64m8_b8(vint64m8_t op1, vint64m8_t op2,
899 vbool8_t carryin, size_t vl) {
900 return vmadc_vvm_i64m8_b8(op1, op2, carryin, vl);
901 }
902
903 //
904 // CHECK-RV64-LABEL: @test_vmadc_vxm_i64m8_b8(
905 // CHECK-RV64-NEXT: entry:
906 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
907 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
908 //
test_vmadc_vxm_i64m8_b8(vint64m8_t op1,int64_t op2,vbool8_t carryin,size_t vl)909 vbool8_t test_vmadc_vxm_i64m8_b8(vint64m8_t op1, int64_t op2, vbool8_t carryin,
910 size_t vl) {
911 return vmadc_vxm_i64m8_b8(op1, op2, carryin, vl);
912 }
913
914 //
915 // CHECK-RV64-LABEL: @test_vmadc_vv_i64m8_b8(
916 // CHECK-RV64-NEXT: entry:
917 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
918 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
919 //
test_vmadc_vv_i64m8_b8(vint64m8_t op1,vint64m8_t op2,size_t vl)920 vbool8_t test_vmadc_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
921 return vmadc_vv_i64m8_b8(op1, op2, vl);
922 }
923
924 //
925 // CHECK-RV64-LABEL: @test_vmadc_vx_i64m8_b8(
926 // CHECK-RV64-NEXT: entry:
927 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
928 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
929 //
test_vmadc_vx_i64m8_b8(vint64m8_t op1,int64_t op2,size_t vl)930 vbool8_t test_vmadc_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
931 return vmadc_vx_i64m8_b8(op1, op2, vl);
932 }
933
934 //
935 // CHECK-RV64-LABEL: @test_vmadc_vvm_u8mf8_b64(
936 // CHECK-RV64-NEXT: entry:
937 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
938 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
939 //
test_vmadc_vvm_u8mf8_b64(vuint8mf8_t op1,vuint8mf8_t op2,vbool64_t carryin,size_t vl)940 vbool64_t test_vmadc_vvm_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2,
941 vbool64_t carryin, size_t vl) {
942 return vmadc_vvm_u8mf8_b64(op1, op2, carryin, vl);
943 }
944
945 //
946 // CHECK-RV64-LABEL: @test_vmadc_vxm_u8mf8_b64(
947 // CHECK-RV64-NEXT: entry:
948 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
949 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
950 //
test_vmadc_vxm_u8mf8_b64(vuint8mf8_t op1,uint8_t op2,vbool64_t carryin,size_t vl)951 vbool64_t test_vmadc_vxm_u8mf8_b64(vuint8mf8_t op1, uint8_t op2,
952 vbool64_t carryin, size_t vl) {
953 return vmadc_vxm_u8mf8_b64(op1, op2, carryin, vl);
954 }
955
956 //
957 // CHECK-RV64-LABEL: @test_vmadc_vv_u8mf8_b64(
958 // CHECK-RV64-NEXT: entry:
959 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
960 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
961 //
test_vmadc_vv_u8mf8_b64(vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)962 vbool64_t test_vmadc_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
963 return vmadc_vv_u8mf8_b64(op1, op2, vl);
964 }
965
966 //
967 // CHECK-RV64-LABEL: @test_vmadc_vx_u8mf8_b64(
968 // CHECK-RV64-NEXT: entry:
969 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
970 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
971 //
test_vmadc_vx_u8mf8_b64(vuint8mf8_t op1,uint8_t op2,size_t vl)972 vbool64_t test_vmadc_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
973 return vmadc_vx_u8mf8_b64(op1, op2, vl);
974 }
975
976 //
977 // CHECK-RV64-LABEL: @test_vmadc_vvm_u8mf4_b32(
978 // CHECK-RV64-NEXT: entry:
979 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
980 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
981 //
test_vmadc_vvm_u8mf4_b32(vuint8mf4_t op1,vuint8mf4_t op2,vbool32_t carryin,size_t vl)982 vbool32_t test_vmadc_vvm_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2,
983 vbool32_t carryin, size_t vl) {
984 return vmadc_vvm_u8mf4_b32(op1, op2, carryin, vl);
985 }
986
987 //
988 // CHECK-RV64-LABEL: @test_vmadc_vxm_u8mf4_b32(
989 // CHECK-RV64-NEXT: entry:
990 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
991 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
992 //
test_vmadc_vxm_u8mf4_b32(vuint8mf4_t op1,uint8_t op2,vbool32_t carryin,size_t vl)993 vbool32_t test_vmadc_vxm_u8mf4_b32(vuint8mf4_t op1, uint8_t op2,
994 vbool32_t carryin, size_t vl) {
995 return vmadc_vxm_u8mf4_b32(op1, op2, carryin, vl);
996 }
997
998 //
999 // CHECK-RV64-LABEL: @test_vmadc_vv_u8mf4_b32(
1000 // CHECK-RV64-NEXT: entry:
1001 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1002 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1003 //
test_vmadc_vv_u8mf4_b32(vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)1004 vbool32_t test_vmadc_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
1005 return vmadc_vv_u8mf4_b32(op1, op2, vl);
1006 }
1007
1008 //
1009 // CHECK-RV64-LABEL: @test_vmadc_vx_u8mf4_b32(
1010 // CHECK-RV64-NEXT: entry:
1011 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
1012 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1013 //
test_vmadc_vx_u8mf4_b32(vuint8mf4_t op1,uint8_t op2,size_t vl)1014 vbool32_t test_vmadc_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
1015 return vmadc_vx_u8mf4_b32(op1, op2, vl);
1016 }
1017
1018 //
1019 // CHECK-RV64-LABEL: @test_vmadc_vvm_u8mf2_b16(
1020 // CHECK-RV64-NEXT: entry:
1021 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1022 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1023 //
test_vmadc_vvm_u8mf2_b16(vuint8mf2_t op1,vuint8mf2_t op2,vbool16_t carryin,size_t vl)1024 vbool16_t test_vmadc_vvm_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2,
1025 vbool16_t carryin, size_t vl) {
1026 return vmadc_vvm_u8mf2_b16(op1, op2, carryin, vl);
1027 }
1028
1029 //
1030 // CHECK-RV64-LABEL: @test_vmadc_vxm_u8mf2_b16(
1031 // CHECK-RV64-NEXT: entry:
1032 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1033 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1034 //
test_vmadc_vxm_u8mf2_b16(vuint8mf2_t op1,uint8_t op2,vbool16_t carryin,size_t vl)1035 vbool16_t test_vmadc_vxm_u8mf2_b16(vuint8mf2_t op1, uint8_t op2,
1036 vbool16_t carryin, size_t vl) {
1037 return vmadc_vxm_u8mf2_b16(op1, op2, carryin, vl);
1038 }
1039
1040 //
1041 // CHECK-RV64-LABEL: @test_vmadc_vv_u8mf2_b16(
1042 // CHECK-RV64-NEXT: entry:
1043 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1044 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1045 //
test_vmadc_vv_u8mf2_b16(vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)1046 vbool16_t test_vmadc_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
1047 return vmadc_vv_u8mf2_b16(op1, op2, vl);
1048 }
1049
1050 //
1051 // CHECK-RV64-LABEL: @test_vmadc_vx_u8mf2_b16(
1052 // CHECK-RV64-NEXT: entry:
1053 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
1054 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1055 //
test_vmadc_vx_u8mf2_b16(vuint8mf2_t op1,uint8_t op2,size_t vl)1056 vbool16_t test_vmadc_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
1057 return vmadc_vx_u8mf2_b16(op1, op2, vl);
1058 }
1059
1060 //
1061 // CHECK-RV64-LABEL: @test_vmadc_vvm_u8m1_b8(
1062 // CHECK-RV64-NEXT: entry:
1063 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1064 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1065 //
test_vmadc_vvm_u8m1_b8(vuint8m1_t op1,vuint8m1_t op2,vbool8_t carryin,size_t vl)1066 vbool8_t test_vmadc_vvm_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2,
1067 vbool8_t carryin, size_t vl) {
1068 return vmadc_vvm_u8m1_b8(op1, op2, carryin, vl);
1069 }
1070
1071 //
1072 // CHECK-RV64-LABEL: @test_vmadc_vxm_u8m1_b8(
1073 // CHECK-RV64-NEXT: entry:
1074 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1075 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1076 //
test_vmadc_vxm_u8m1_b8(vuint8m1_t op1,uint8_t op2,vbool8_t carryin,size_t vl)1077 vbool8_t test_vmadc_vxm_u8m1_b8(vuint8m1_t op1, uint8_t op2, vbool8_t carryin,
1078 size_t vl) {
1079 return vmadc_vxm_u8m1_b8(op1, op2, carryin, vl);
1080 }
1081
1082 //
1083 // CHECK-RV64-LABEL: @test_vmadc_vv_u8m1_b8(
1084 // CHECK-RV64-NEXT: entry:
1085 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1086 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1087 //
test_vmadc_vv_u8m1_b8(vuint8m1_t op1,vuint8m1_t op2,size_t vl)1088 vbool8_t test_vmadc_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
1089 return vmadc_vv_u8m1_b8(op1, op2, vl);
1090 }
1091
1092 //
1093 // CHECK-RV64-LABEL: @test_vmadc_vx_u8m1_b8(
1094 // CHECK-RV64-NEXT: entry:
1095 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
1096 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1097 //
test_vmadc_vx_u8m1_b8(vuint8m1_t op1,uint8_t op2,size_t vl)1098 vbool8_t test_vmadc_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
1099 return vmadc_vx_u8m1_b8(op1, op2, vl);
1100 }
1101
1102 //
1103 // CHECK-RV64-LABEL: @test_vmadc_vvm_u8m2_b4(
1104 // CHECK-RV64-NEXT: entry:
1105 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1106 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1107 //
test_vmadc_vvm_u8m2_b4(vuint8m2_t op1,vuint8m2_t op2,vbool4_t carryin,size_t vl)1108 vbool4_t test_vmadc_vvm_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2,
1109 vbool4_t carryin, size_t vl) {
1110 return vmadc_vvm_u8m2_b4(op1, op2, carryin, vl);
1111 }
1112
1113 //
1114 // CHECK-RV64-LABEL: @test_vmadc_vxm_u8m2_b4(
1115 // CHECK-RV64-NEXT: entry:
1116 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1117 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1118 //
test_vmadc_vxm_u8m2_b4(vuint8m2_t op1,uint8_t op2,vbool4_t carryin,size_t vl)1119 vbool4_t test_vmadc_vxm_u8m2_b4(vuint8m2_t op1, uint8_t op2, vbool4_t carryin,
1120 size_t vl) {
1121 return vmadc_vxm_u8m2_b4(op1, op2, carryin, vl);
1122 }
1123
1124 //
1125 // CHECK-RV64-LABEL: @test_vmadc_vv_u8m2_b4(
1126 // CHECK-RV64-NEXT: entry:
1127 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1128 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1129 //
test_vmadc_vv_u8m2_b4(vuint8m2_t op1,vuint8m2_t op2,size_t vl)1130 vbool4_t test_vmadc_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
1131 return vmadc_vv_u8m2_b4(op1, op2, vl);
1132 }
1133
1134 //
1135 // CHECK-RV64-LABEL: @test_vmadc_vx_u8m2_b4(
1136 // CHECK-RV64-NEXT: entry:
1137 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
1138 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1139 //
test_vmadc_vx_u8m2_b4(vuint8m2_t op1,uint8_t op2,size_t vl)1140 vbool4_t test_vmadc_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
1141 return vmadc_vx_u8m2_b4(op1, op2, vl);
1142 }
1143
1144 //
1145 // CHECK-RV64-LABEL: @test_vmadc_vvm_u8m4_b2(
1146 // CHECK-RV64-NEXT: entry:
1147 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1148 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
1149 //
test_vmadc_vvm_u8m4_b2(vuint8m4_t op1,vuint8m4_t op2,vbool2_t carryin,size_t vl)1150 vbool2_t test_vmadc_vvm_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2,
1151 vbool2_t carryin, size_t vl) {
1152 return vmadc_vvm_u8m4_b2(op1, op2, carryin, vl);
1153 }
1154
1155 //
1156 // CHECK-RV64-LABEL: @test_vmadc_vxm_u8m4_b2(
1157 // CHECK-RV64-NEXT: entry:
1158 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1159 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
1160 //
test_vmadc_vxm_u8m4_b2(vuint8m4_t op1,uint8_t op2,vbool2_t carryin,size_t vl)1161 vbool2_t test_vmadc_vxm_u8m4_b2(vuint8m4_t op1, uint8_t op2, vbool2_t carryin,
1162 size_t vl) {
1163 return vmadc_vxm_u8m4_b2(op1, op2, carryin, vl);
1164 }
1165
1166 //
1167 // CHECK-RV64-LABEL: @test_vmadc_vv_u8m4_b2(
1168 // CHECK-RV64-NEXT: entry:
1169 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1170 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
1171 //
test_vmadc_vv_u8m4_b2(vuint8m4_t op1,vuint8m4_t op2,size_t vl)1172 vbool2_t test_vmadc_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
1173 return vmadc_vv_u8m4_b2(op1, op2, vl);
1174 }
1175
1176 //
1177 // CHECK-RV64-LABEL: @test_vmadc_vx_u8m4_b2(
1178 // CHECK-RV64-NEXT: entry:
1179 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
1180 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
1181 //
test_vmadc_vx_u8m4_b2(vuint8m4_t op1,uint8_t op2,size_t vl)1182 vbool2_t test_vmadc_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
1183 return vmadc_vx_u8m4_b2(op1, op2, vl);
1184 }
1185
1186 //
1187 // CHECK-RV64-LABEL: @test_vmadc_vvm_u8m8_b1(
1188 // CHECK-RV64-NEXT: entry:
1189 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1190 // CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
1191 //
test_vmadc_vvm_u8m8_b1(vuint8m8_t op1,vuint8m8_t op2,vbool1_t carryin,size_t vl)1192 vbool1_t test_vmadc_vvm_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2,
1193 vbool1_t carryin, size_t vl) {
1194 return vmadc_vvm_u8m8_b1(op1, op2, carryin, vl);
1195 }
1196
1197 //
1198 // CHECK-RV64-LABEL: @test_vmadc_vxm_u8m8_b1(
1199 // CHECK-RV64-NEXT: entry:
1200 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1201 // CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
1202 //
test_vmadc_vxm_u8m8_b1(vuint8m8_t op1,uint8_t op2,vbool1_t carryin,size_t vl)1203 vbool1_t test_vmadc_vxm_u8m8_b1(vuint8m8_t op1, uint8_t op2, vbool1_t carryin,
1204 size_t vl) {
1205 return vmadc_vxm_u8m8_b1(op1, op2, carryin, vl);
1206 }
1207
1208 //
1209 // CHECK-RV64-LABEL: @test_vmadc_vv_u8m8_b1(
1210 // CHECK-RV64-NEXT: entry:
1211 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1212 // CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
1213 //
test_vmadc_vv_u8m8_b1(vuint8m8_t op1,vuint8m8_t op2,size_t vl)1214 vbool1_t test_vmadc_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
1215 return vmadc_vv_u8m8_b1(op1, op2, vl);
1216 }
1217
1218 //
1219 // CHECK-RV64-LABEL: @test_vmadc_vx_u8m8_b1(
1220 // CHECK-RV64-NEXT: entry:
1221 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
1222 // CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
1223 //
test_vmadc_vx_u8m8_b1(vuint8m8_t op1,uint8_t op2,size_t vl)1224 vbool1_t test_vmadc_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
1225 return vmadc_vx_u8m8_b1(op1, op2, vl);
1226 }
1227
1228 //
1229 // CHECK-RV64-LABEL: @test_vmadc_vvm_u16mf4_b64(
1230 // CHECK-RV64-NEXT: entry:
1231 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1232 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1233 //
test_vmadc_vvm_u16mf4_b64(vuint16mf4_t op1,vuint16mf4_t op2,vbool64_t carryin,size_t vl)1234 vbool64_t test_vmadc_vvm_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
1235 vbool64_t carryin, size_t vl) {
1236 return vmadc_vvm_u16mf4_b64(op1, op2, carryin, vl);
1237 }
1238
1239 //
1240 // CHECK-RV64-LABEL: @test_vmadc_vxm_u16mf4_b64(
1241 // CHECK-RV64-NEXT: entry:
1242 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1243 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1244 //
test_vmadc_vxm_u16mf4_b64(vuint16mf4_t op1,uint16_t op2,vbool64_t carryin,size_t vl)1245 vbool64_t test_vmadc_vxm_u16mf4_b64(vuint16mf4_t op1, uint16_t op2,
1246 vbool64_t carryin, size_t vl) {
1247 return vmadc_vxm_u16mf4_b64(op1, op2, carryin, vl);
1248 }
1249
1250 //
1251 // CHECK-RV64-LABEL: @test_vmadc_vv_u16mf4_b64(
1252 // CHECK-RV64-NEXT: entry:
1253 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1254 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1255 //
test_vmadc_vv_u16mf4_b64(vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)1256 vbool64_t test_vmadc_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
1257 size_t vl) {
1258 return vmadc_vv_u16mf4_b64(op1, op2, vl);
1259 }
1260
1261 //
1262 // CHECK-RV64-LABEL: @test_vmadc_vx_u16mf4_b64(
1263 // CHECK-RV64-NEXT: entry:
1264 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
1265 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1266 //
test_vmadc_vx_u16mf4_b64(vuint16mf4_t op1,uint16_t op2,size_t vl)1267 vbool64_t test_vmadc_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
1268 return vmadc_vx_u16mf4_b64(op1, op2, vl);
1269 }
1270
1271 //
1272 // CHECK-RV64-LABEL: @test_vmadc_vvm_u16mf2_b32(
1273 // CHECK-RV64-NEXT: entry:
1274 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1275 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1276 //
test_vmadc_vvm_u16mf2_b32(vuint16mf2_t op1,vuint16mf2_t op2,vbool32_t carryin,size_t vl)1277 vbool32_t test_vmadc_vvm_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
1278 vbool32_t carryin, size_t vl) {
1279 return vmadc_vvm_u16mf2_b32(op1, op2, carryin, vl);
1280 }
1281
1282 //
1283 // CHECK-RV64-LABEL: @test_vmadc_vxm_u16mf2_b32(
1284 // CHECK-RV64-NEXT: entry:
1285 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1286 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1287 //
test_vmadc_vxm_u16mf2_b32(vuint16mf2_t op1,uint16_t op2,vbool32_t carryin,size_t vl)1288 vbool32_t test_vmadc_vxm_u16mf2_b32(vuint16mf2_t op1, uint16_t op2,
1289 vbool32_t carryin, size_t vl) {
1290 return vmadc_vxm_u16mf2_b32(op1, op2, carryin, vl);
1291 }
1292
1293 //
1294 // CHECK-RV64-LABEL: @test_vmadc_vv_u16mf2_b32(
1295 // CHECK-RV64-NEXT: entry:
1296 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1297 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1298 //
test_vmadc_vv_u16mf2_b32(vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)1299 vbool32_t test_vmadc_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
1300 size_t vl) {
1301 return vmadc_vv_u16mf2_b32(op1, op2, vl);
1302 }
1303
1304 //
1305 // CHECK-RV64-LABEL: @test_vmadc_vx_u16mf2_b32(
1306 // CHECK-RV64-NEXT: entry:
1307 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
1308 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1309 //
test_vmadc_vx_u16mf2_b32(vuint16mf2_t op1,uint16_t op2,size_t vl)1310 vbool32_t test_vmadc_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
1311 return vmadc_vx_u16mf2_b32(op1, op2, vl);
1312 }
1313
1314 //
1315 // CHECK-RV64-LABEL: @test_vmadc_vvm_u16m1_b16(
1316 // CHECK-RV64-NEXT: entry:
1317 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1318 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1319 //
test_vmadc_vvm_u16m1_b16(vuint16m1_t op1,vuint16m1_t op2,vbool16_t carryin,size_t vl)1320 vbool16_t test_vmadc_vvm_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2,
1321 vbool16_t carryin, size_t vl) {
1322 return vmadc_vvm_u16m1_b16(op1, op2, carryin, vl);
1323 }
1324
1325 //
1326 // CHECK-RV64-LABEL: @test_vmadc_vxm_u16m1_b16(
1327 // CHECK-RV64-NEXT: entry:
1328 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1329 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1330 //
test_vmadc_vxm_u16m1_b16(vuint16m1_t op1,uint16_t op2,vbool16_t carryin,size_t vl)1331 vbool16_t test_vmadc_vxm_u16m1_b16(vuint16m1_t op1, uint16_t op2,
1332 vbool16_t carryin, size_t vl) {
1333 return vmadc_vxm_u16m1_b16(op1, op2, carryin, vl);
1334 }
1335
1336 //
1337 // CHECK-RV64-LABEL: @test_vmadc_vv_u16m1_b16(
1338 // CHECK-RV64-NEXT: entry:
1339 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1340 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1341 //
test_vmadc_vv_u16m1_b16(vuint16m1_t op1,vuint16m1_t op2,size_t vl)1342 vbool16_t test_vmadc_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
1343 return vmadc_vv_u16m1_b16(op1, op2, vl);
1344 }
1345
1346 //
1347 // CHECK-RV64-LABEL: @test_vmadc_vx_u16m1_b16(
1348 // CHECK-RV64-NEXT: entry:
1349 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
1350 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1351 //
test_vmadc_vx_u16m1_b16(vuint16m1_t op1,uint16_t op2,size_t vl)1352 vbool16_t test_vmadc_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
1353 return vmadc_vx_u16m1_b16(op1, op2, vl);
1354 }
1355
1356 //
1357 // CHECK-RV64-LABEL: @test_vmadc_vvm_u16m2_b8(
1358 // CHECK-RV64-NEXT: entry:
1359 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1360 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1361 //
test_vmadc_vvm_u16m2_b8(vuint16m2_t op1,vuint16m2_t op2,vbool8_t carryin,size_t vl)1362 vbool8_t test_vmadc_vvm_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2,
1363 vbool8_t carryin, size_t vl) {
1364 return vmadc_vvm_u16m2_b8(op1, op2, carryin, vl);
1365 }
1366
1367 //
1368 // CHECK-RV64-LABEL: @test_vmadc_vxm_u16m2_b8(
1369 // CHECK-RV64-NEXT: entry:
1370 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1371 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1372 //
test_vmadc_vxm_u16m2_b8(vuint16m2_t op1,uint16_t op2,vbool8_t carryin,size_t vl)1373 vbool8_t test_vmadc_vxm_u16m2_b8(vuint16m2_t op1, uint16_t op2,
1374 vbool8_t carryin, size_t vl) {
1375 return vmadc_vxm_u16m2_b8(op1, op2, carryin, vl);
1376 }
1377
1378 //
1379 // CHECK-RV64-LABEL: @test_vmadc_vv_u16m2_b8(
1380 // CHECK-RV64-NEXT: entry:
1381 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1382 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1383 //
test_vmadc_vv_u16m2_b8(vuint16m2_t op1,vuint16m2_t op2,size_t vl)1384 vbool8_t test_vmadc_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
1385 return vmadc_vv_u16m2_b8(op1, op2, vl);
1386 }
1387
1388 //
1389 // CHECK-RV64-LABEL: @test_vmadc_vx_u16m2_b8(
1390 // CHECK-RV64-NEXT: entry:
1391 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
1392 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1393 //
test_vmadc_vx_u16m2_b8(vuint16m2_t op1,uint16_t op2,size_t vl)1394 vbool8_t test_vmadc_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
1395 return vmadc_vx_u16m2_b8(op1, op2, vl);
1396 }
1397
1398 //
1399 // CHECK-RV64-LABEL: @test_vmadc_vvm_u16m4_b4(
1400 // CHECK-RV64-NEXT: entry:
1401 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1402 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1403 //
test_vmadc_vvm_u16m4_b4(vuint16m4_t op1,vuint16m4_t op2,vbool4_t carryin,size_t vl)1404 vbool4_t test_vmadc_vvm_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2,
1405 vbool4_t carryin, size_t vl) {
1406 return vmadc_vvm_u16m4_b4(op1, op2, carryin, vl);
1407 }
1408
1409 //
1410 // CHECK-RV64-LABEL: @test_vmadc_vxm_u16m4_b4(
1411 // CHECK-RV64-NEXT: entry:
1412 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1413 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1414 //
test_vmadc_vxm_u16m4_b4(vuint16m4_t op1,uint16_t op2,vbool4_t carryin,size_t vl)1415 vbool4_t test_vmadc_vxm_u16m4_b4(vuint16m4_t op1, uint16_t op2,
1416 vbool4_t carryin, size_t vl) {
1417 return vmadc_vxm_u16m4_b4(op1, op2, carryin, vl);
1418 }
1419
1420 //
1421 // CHECK-RV64-LABEL: @test_vmadc_vv_u16m4_b4(
1422 // CHECK-RV64-NEXT: entry:
1423 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1424 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1425 //
test_vmadc_vv_u16m4_b4(vuint16m4_t op1,vuint16m4_t op2,size_t vl)1426 vbool4_t test_vmadc_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
1427 return vmadc_vv_u16m4_b4(op1, op2, vl);
1428 }
1429
1430 //
1431 // CHECK-RV64-LABEL: @test_vmadc_vx_u16m4_b4(
1432 // CHECK-RV64-NEXT: entry:
1433 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
1434 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1435 //
test_vmadc_vx_u16m4_b4(vuint16m4_t op1,uint16_t op2,size_t vl)1436 vbool4_t test_vmadc_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
1437 return vmadc_vx_u16m4_b4(op1, op2, vl);
1438 }
1439
1440 //
1441 // CHECK-RV64-LABEL: @test_vmadc_vvm_u16m8_b2(
1442 // CHECK-RV64-NEXT: entry:
1443 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1444 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
1445 //
test_vmadc_vvm_u16m8_b2(vuint16m8_t op1,vuint16m8_t op2,vbool2_t carryin,size_t vl)1446 vbool2_t test_vmadc_vvm_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2,
1447 vbool2_t carryin, size_t vl) {
1448 return vmadc_vvm_u16m8_b2(op1, op2, carryin, vl);
1449 }
1450
1451 //
1452 // CHECK-RV64-LABEL: @test_vmadc_vxm_u16m8_b2(
1453 // CHECK-RV64-NEXT: entry:
1454 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1455 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
1456 //
test_vmadc_vxm_u16m8_b2(vuint16m8_t op1,uint16_t op2,vbool2_t carryin,size_t vl)1457 vbool2_t test_vmadc_vxm_u16m8_b2(vuint16m8_t op1, uint16_t op2,
1458 vbool2_t carryin, size_t vl) {
1459 return vmadc_vxm_u16m8_b2(op1, op2, carryin, vl);
1460 }
1461
1462 //
1463 // CHECK-RV64-LABEL: @test_vmadc_vv_u16m8_b2(
1464 // CHECK-RV64-NEXT: entry:
1465 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1466 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
1467 //
test_vmadc_vv_u16m8_b2(vuint16m8_t op1,vuint16m8_t op2,size_t vl)1468 vbool2_t test_vmadc_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
1469 return vmadc_vv_u16m8_b2(op1, op2, vl);
1470 }
1471
1472 //
1473 // CHECK-RV64-LABEL: @test_vmadc_vx_u16m8_b2(
1474 // CHECK-RV64-NEXT: entry:
1475 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
1476 // CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
1477 //
test_vmadc_vx_u16m8_b2(vuint16m8_t op1,uint16_t op2,size_t vl)1478 vbool2_t test_vmadc_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
1479 return vmadc_vx_u16m8_b2(op1, op2, vl);
1480 }
1481
1482 //
1483 // CHECK-RV64-LABEL: @test_vmadc_vvm_u32mf2_b64(
1484 // CHECK-RV64-NEXT: entry:
1485 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1486 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1487 //
test_vmadc_vvm_u32mf2_b64(vuint32mf2_t op1,vuint32mf2_t op2,vbool64_t carryin,size_t vl)1488 vbool64_t test_vmadc_vvm_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
1489 vbool64_t carryin, size_t vl) {
1490 return vmadc_vvm_u32mf2_b64(op1, op2, carryin, vl);
1491 }
1492
1493 //
1494 // CHECK-RV64-LABEL: @test_vmadc_vxm_u32mf2_b64(
1495 // CHECK-RV64-NEXT: entry:
1496 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1497 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1498 //
test_vmadc_vxm_u32mf2_b64(vuint32mf2_t op1,uint32_t op2,vbool64_t carryin,size_t vl)1499 vbool64_t test_vmadc_vxm_u32mf2_b64(vuint32mf2_t op1, uint32_t op2,
1500 vbool64_t carryin, size_t vl) {
1501 return vmadc_vxm_u32mf2_b64(op1, op2, carryin, vl);
1502 }
1503
1504 //
1505 // CHECK-RV64-LABEL: @test_vmadc_vv_u32mf2_b64(
1506 // CHECK-RV64-NEXT: entry:
1507 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1508 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1509 //
test_vmadc_vv_u32mf2_b64(vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)1510 vbool64_t test_vmadc_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
1511 size_t vl) {
1512 return vmadc_vv_u32mf2_b64(op1, op2, vl);
1513 }
1514
1515 //
1516 // CHECK-RV64-LABEL: @test_vmadc_vx_u32mf2_b64(
1517 // CHECK-RV64-NEXT: entry:
1518 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1519 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1520 //
test_vmadc_vx_u32mf2_b64(vuint32mf2_t op1,uint32_t op2,size_t vl)1521 vbool64_t test_vmadc_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
1522 return vmadc_vx_u32mf2_b64(op1, op2, vl);
1523 }
1524
1525 //
1526 // CHECK-RV64-LABEL: @test_vmadc_vvm_u32m1_b32(
1527 // CHECK-RV64-NEXT: entry:
1528 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1529 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1530 //
test_vmadc_vvm_u32m1_b32(vuint32m1_t op1,vuint32m1_t op2,vbool32_t carryin,size_t vl)1531 vbool32_t test_vmadc_vvm_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2,
1532 vbool32_t carryin, size_t vl) {
1533 return vmadc_vvm_u32m1_b32(op1, op2, carryin, vl);
1534 }
1535
1536 //
1537 // CHECK-RV64-LABEL: @test_vmadc_vxm_u32m1_b32(
1538 // CHECK-RV64-NEXT: entry:
1539 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1540 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1541 //
test_vmadc_vxm_u32m1_b32(vuint32m1_t op1,uint32_t op2,vbool32_t carryin,size_t vl)1542 vbool32_t test_vmadc_vxm_u32m1_b32(vuint32m1_t op1, uint32_t op2,
1543 vbool32_t carryin, size_t vl) {
1544 return vmadc_vxm_u32m1_b32(op1, op2, carryin, vl);
1545 }
1546
1547 //
1548 // CHECK-RV64-LABEL: @test_vmadc_vv_u32m1_b32(
1549 // CHECK-RV64-NEXT: entry:
1550 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1551 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1552 //
test_vmadc_vv_u32m1_b32(vuint32m1_t op1,vuint32m1_t op2,size_t vl)1553 vbool32_t test_vmadc_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
1554 return vmadc_vv_u32m1_b32(op1, op2, vl);
1555 }
1556
1557 //
1558 // CHECK-RV64-LABEL: @test_vmadc_vx_u32m1_b32(
1559 // CHECK-RV64-NEXT: entry:
1560 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1561 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1562 //
test_vmadc_vx_u32m1_b32(vuint32m1_t op1,uint32_t op2,size_t vl)1563 vbool32_t test_vmadc_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
1564 return vmadc_vx_u32m1_b32(op1, op2, vl);
1565 }
1566
1567 //
1568 // CHECK-RV64-LABEL: @test_vmadc_vvm_u32m2_b16(
1569 // CHECK-RV64-NEXT: entry:
1570 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1571 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1572 //
test_vmadc_vvm_u32m2_b16(vuint32m2_t op1,vuint32m2_t op2,vbool16_t carryin,size_t vl)1573 vbool16_t test_vmadc_vvm_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2,
1574 vbool16_t carryin, size_t vl) {
1575 return vmadc_vvm_u32m2_b16(op1, op2, carryin, vl);
1576 }
1577
1578 //
1579 // CHECK-RV64-LABEL: @test_vmadc_vxm_u32m2_b16(
1580 // CHECK-RV64-NEXT: entry:
1581 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1582 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1583 //
test_vmadc_vxm_u32m2_b16(vuint32m2_t op1,uint32_t op2,vbool16_t carryin,size_t vl)1584 vbool16_t test_vmadc_vxm_u32m2_b16(vuint32m2_t op1, uint32_t op2,
1585 vbool16_t carryin, size_t vl) {
1586 return vmadc_vxm_u32m2_b16(op1, op2, carryin, vl);
1587 }
1588
1589 //
1590 // CHECK-RV64-LABEL: @test_vmadc_vv_u32m2_b16(
1591 // CHECK-RV64-NEXT: entry:
1592 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1593 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1594 //
test_vmadc_vv_u32m2_b16(vuint32m2_t op1,vuint32m2_t op2,size_t vl)1595 vbool16_t test_vmadc_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
1596 return vmadc_vv_u32m2_b16(op1, op2, vl);
1597 }
1598
1599 //
1600 // CHECK-RV64-LABEL: @test_vmadc_vx_u32m2_b16(
1601 // CHECK-RV64-NEXT: entry:
1602 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1603 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1604 //
test_vmadc_vx_u32m2_b16(vuint32m2_t op1,uint32_t op2,size_t vl)1605 vbool16_t test_vmadc_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
1606 return vmadc_vx_u32m2_b16(op1, op2, vl);
1607 }
1608
1609 //
1610 // CHECK-RV64-LABEL: @test_vmadc_vvm_u32m4_b8(
1611 // CHECK-RV64-NEXT: entry:
1612 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1613 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1614 //
test_vmadc_vvm_u32m4_b8(vuint32m4_t op1,vuint32m4_t op2,vbool8_t carryin,size_t vl)1615 vbool8_t test_vmadc_vvm_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2,
1616 vbool8_t carryin, size_t vl) {
1617 return vmadc_vvm_u32m4_b8(op1, op2, carryin, vl);
1618 }
1619
1620 //
1621 // CHECK-RV64-LABEL: @test_vmadc_vxm_u32m4_b8(
1622 // CHECK-RV64-NEXT: entry:
1623 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1624 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1625 //
test_vmadc_vxm_u32m4_b8(vuint32m4_t op1,uint32_t op2,vbool8_t carryin,size_t vl)1626 vbool8_t test_vmadc_vxm_u32m4_b8(vuint32m4_t op1, uint32_t op2,
1627 vbool8_t carryin, size_t vl) {
1628 return vmadc_vxm_u32m4_b8(op1, op2, carryin, vl);
1629 }
1630
1631 //
1632 // CHECK-RV64-LABEL: @test_vmadc_vv_u32m4_b8(
1633 // CHECK-RV64-NEXT: entry:
1634 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1635 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1636 //
test_vmadc_vv_u32m4_b8(vuint32m4_t op1,vuint32m4_t op2,size_t vl)1637 vbool8_t test_vmadc_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
1638 return vmadc_vv_u32m4_b8(op1, op2, vl);
1639 }
1640
1641 //
1642 // CHECK-RV64-LABEL: @test_vmadc_vx_u32m4_b8(
1643 // CHECK-RV64-NEXT: entry:
1644 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1645 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1646 //
test_vmadc_vx_u32m4_b8(vuint32m4_t op1,uint32_t op2,size_t vl)1647 vbool8_t test_vmadc_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
1648 return vmadc_vx_u32m4_b8(op1, op2, vl);
1649 }
1650
1651 //
1652 // CHECK-RV64-LABEL: @test_vmadc_vvm_u32m8_b4(
1653 // CHECK-RV64-NEXT: entry:
1654 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1655 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1656 //
test_vmadc_vvm_u32m8_b4(vuint32m8_t op1,vuint32m8_t op2,vbool4_t carryin,size_t vl)1657 vbool4_t test_vmadc_vvm_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2,
1658 vbool4_t carryin, size_t vl) {
1659 return vmadc_vvm_u32m8_b4(op1, op2, carryin, vl);
1660 }
1661
1662 //
1663 // CHECK-RV64-LABEL: @test_vmadc_vxm_u32m8_b4(
1664 // CHECK-RV64-NEXT: entry:
1665 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1666 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1667 //
test_vmadc_vxm_u32m8_b4(vuint32m8_t op1,uint32_t op2,vbool4_t carryin,size_t vl)1668 vbool4_t test_vmadc_vxm_u32m8_b4(vuint32m8_t op1, uint32_t op2,
1669 vbool4_t carryin, size_t vl) {
1670 return vmadc_vxm_u32m8_b4(op1, op2, carryin, vl);
1671 }
1672
1673 //
1674 // CHECK-RV64-LABEL: @test_vmadc_vv_u32m8_b4(
1675 // CHECK-RV64-NEXT: entry:
1676 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1677 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1678 //
test_vmadc_vv_u32m8_b4(vuint32m8_t op1,vuint32m8_t op2,size_t vl)1679 vbool4_t test_vmadc_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
1680 return vmadc_vv_u32m8_b4(op1, op2, vl);
1681 }
1682
1683 //
1684 // CHECK-RV64-LABEL: @test_vmadc_vx_u32m8_b4(
1685 // CHECK-RV64-NEXT: entry:
1686 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1687 // CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1688 //
test_vmadc_vx_u32m8_b4(vuint32m8_t op1,uint32_t op2,size_t vl)1689 vbool4_t test_vmadc_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
1690 return vmadc_vx_u32m8_b4(op1, op2, vl);
1691 }
1692
1693 //
1694 // CHECK-RV64-LABEL: @test_vmadc_vvm_u64m1_b64(
1695 // CHECK-RV64-NEXT: entry:
1696 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1697 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1698 //
test_vmadc_vvm_u64m1_b64(vuint64m1_t op1,vuint64m1_t op2,vbool64_t carryin,size_t vl)1699 vbool64_t test_vmadc_vvm_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2,
1700 vbool64_t carryin, size_t vl) {
1701 return vmadc_vvm_u64m1_b64(op1, op2, carryin, vl);
1702 }
1703
1704 //
1705 // CHECK-RV64-LABEL: @test_vmadc_vxm_u64m1_b64(
1706 // CHECK-RV64-NEXT: entry:
1707 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1708 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1709 //
test_vmadc_vxm_u64m1_b64(vuint64m1_t op1,uint64_t op2,vbool64_t carryin,size_t vl)1710 vbool64_t test_vmadc_vxm_u64m1_b64(vuint64m1_t op1, uint64_t op2,
1711 vbool64_t carryin, size_t vl) {
1712 return vmadc_vxm_u64m1_b64(op1, op2, carryin, vl);
1713 }
1714
1715 //
1716 // CHECK-RV64-LABEL: @test_vmadc_vv_u64m1_b64(
1717 // CHECK-RV64-NEXT: entry:
1718 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
1719 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1720 //
test_vmadc_vv_u64m1_b64(vuint64m1_t op1,vuint64m1_t op2,size_t vl)1721 vbool64_t test_vmadc_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
1722 return vmadc_vv_u64m1_b64(op1, op2, vl);
1723 }
1724
1725 //
1726 // CHECK-RV64-LABEL: @test_vmadc_vx_u64m1_b64(
1727 // CHECK-RV64-NEXT: entry:
1728 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
1729 // CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
1730 //
test_vmadc_vx_u64m1_b64(vuint64m1_t op1,uint64_t op2,size_t vl)1731 vbool64_t test_vmadc_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
1732 return vmadc_vx_u64m1_b64(op1, op2, vl);
1733 }
1734
1735 //
1736 // CHECK-RV64-LABEL: @test_vmadc_vvm_u64m2_b32(
1737 // CHECK-RV64-NEXT: entry:
1738 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1739 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1740 //
test_vmadc_vvm_u64m2_b32(vuint64m2_t op1,vuint64m2_t op2,vbool32_t carryin,size_t vl)1741 vbool32_t test_vmadc_vvm_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2,
1742 vbool32_t carryin, size_t vl) {
1743 return vmadc_vvm_u64m2_b32(op1, op2, carryin, vl);
1744 }
1745
1746 //
1747 // CHECK-RV64-LABEL: @test_vmadc_vxm_u64m2_b32(
1748 // CHECK-RV64-NEXT: entry:
1749 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1750 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1751 //
test_vmadc_vxm_u64m2_b32(vuint64m2_t op1,uint64_t op2,vbool32_t carryin,size_t vl)1752 vbool32_t test_vmadc_vxm_u64m2_b32(vuint64m2_t op1, uint64_t op2,
1753 vbool32_t carryin, size_t vl) {
1754 return vmadc_vxm_u64m2_b32(op1, op2, carryin, vl);
1755 }
1756
1757 //
1758 // CHECK-RV64-LABEL: @test_vmadc_vv_u64m2_b32(
1759 // CHECK-RV64-NEXT: entry:
1760 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
1761 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1762 //
test_vmadc_vv_u64m2_b32(vuint64m2_t op1,vuint64m2_t op2,size_t vl)1763 vbool32_t test_vmadc_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
1764 return vmadc_vv_u64m2_b32(op1, op2, vl);
1765 }
1766
1767 //
1768 // CHECK-RV64-LABEL: @test_vmadc_vx_u64m2_b32(
1769 // CHECK-RV64-NEXT: entry:
1770 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
1771 // CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
1772 //
test_vmadc_vx_u64m2_b32(vuint64m2_t op1,uint64_t op2,size_t vl)1773 vbool32_t test_vmadc_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
1774 return vmadc_vx_u64m2_b32(op1, op2, vl);
1775 }
1776
1777 //
1778 // CHECK-RV64-LABEL: @test_vmadc_vvm_u64m4_b16(
1779 // CHECK-RV64-NEXT: entry:
1780 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1781 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1782 //
test_vmadc_vvm_u64m4_b16(vuint64m4_t op1,vuint64m4_t op2,vbool16_t carryin,size_t vl)1783 vbool16_t test_vmadc_vvm_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2,
1784 vbool16_t carryin, size_t vl) {
1785 return vmadc_vvm_u64m4_b16(op1, op2, carryin, vl);
1786 }
1787
1788 //
1789 // CHECK-RV64-LABEL: @test_vmadc_vxm_u64m4_b16(
1790 // CHECK-RV64-NEXT: entry:
1791 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1792 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1793 //
test_vmadc_vxm_u64m4_b16(vuint64m4_t op1,uint64_t op2,vbool16_t carryin,size_t vl)1794 vbool16_t test_vmadc_vxm_u64m4_b16(vuint64m4_t op1, uint64_t op2,
1795 vbool16_t carryin, size_t vl) {
1796 return vmadc_vxm_u64m4_b16(op1, op2, carryin, vl);
1797 }
1798
1799 //
1800 // CHECK-RV64-LABEL: @test_vmadc_vv_u64m4_b16(
1801 // CHECK-RV64-NEXT: entry:
1802 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
1803 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1804 //
test_vmadc_vv_u64m4_b16(vuint64m4_t op1,vuint64m4_t op2,size_t vl)1805 vbool16_t test_vmadc_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
1806 return vmadc_vv_u64m4_b16(op1, op2, vl);
1807 }
1808
1809 //
1810 // CHECK-RV64-LABEL: @test_vmadc_vx_u64m4_b16(
1811 // CHECK-RV64-NEXT: entry:
1812 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
1813 // CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
1814 //
test_vmadc_vx_u64m4_b16(vuint64m4_t op1,uint64_t op2,size_t vl)1815 vbool16_t test_vmadc_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
1816 return vmadc_vx_u64m4_b16(op1, op2, vl);
1817 }
1818
1819 //
1820 // CHECK-RV64-LABEL: @test_vmadc_vvm_u64m8_b8(
1821 // CHECK-RV64-NEXT: entry:
1822 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1823 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1824 //
test_vmadc_vvm_u64m8_b8(vuint64m8_t op1,vuint64m8_t op2,vbool8_t carryin,size_t vl)1825 vbool8_t test_vmadc_vvm_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2,
1826 vbool8_t carryin, size_t vl) {
1827 return vmadc_vvm_u64m8_b8(op1, op2, carryin, vl);
1828 }
1829
1830 //
1831 // CHECK-RV64-LABEL: @test_vmadc_vxm_u64m8_b8(
1832 // CHECK-RV64-NEXT: entry:
1833 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
1834 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1835 //
test_vmadc_vxm_u64m8_b8(vuint64m8_t op1,uint64_t op2,vbool8_t carryin,size_t vl)1836 vbool8_t test_vmadc_vxm_u64m8_b8(vuint64m8_t op1, uint64_t op2,
1837 vbool8_t carryin, size_t vl) {
1838 return vmadc_vxm_u64m8_b8(op1, op2, carryin, vl);
1839 }
1840
1841 //
1842 // CHECK-RV64-LABEL: @test_vmadc_vv_u64m8_b8(
1843 // CHECK-RV64-NEXT: entry:
1844 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
1845 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1846 //
test_vmadc_vv_u64m8_b8(vuint64m8_t op1,vuint64m8_t op2,size_t vl)1847 vbool8_t test_vmadc_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
1848 return vmadc_vv_u64m8_b8(op1, op2, vl);
1849 }
1850
1851 //
1852 // CHECK-RV64-LABEL: @test_vmadc_vx_u64m8_b8(
1853 // CHECK-RV64-NEXT: entry:
1854 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
1855 // CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
1856 //
test_vmadc_vx_u64m8_b8(vuint64m8_t op1,uint64_t op2,size_t vl)1857 vbool8_t test_vmadc_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
1858 return vmadc_vx_u64m8_b8(op1, op2, vl);
1859 }
1860