1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
4 
5 #include <riscv_vector.h>
6 
7 //
8 // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8(
9 // CHECK-RV64-NEXT:  entry:
10 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
11 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
12 //
test_vdiv_vv_i8mf8(vint8mf8_t op1,vint8mf8_t op2,size_t vl)13 vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
14   return vdiv_vv_i8mf8(op1, op2, vl);
15 }
16 
17 //
18 // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8(
19 // CHECK-RV64-NEXT:  entry:
20 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
21 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
22 //
test_vdiv_vx_i8mf8(vint8mf8_t op1,int8_t op2,size_t vl)23 vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
24   return vdiv_vx_i8mf8(op1, op2, vl);
25 }
26 
27 //
28 // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4(
29 // CHECK-RV64-NEXT:  entry:
30 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
31 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
32 //
test_vdiv_vv_i8mf4(vint8mf4_t op1,vint8mf4_t op2,size_t vl)33 vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
34   return vdiv_vv_i8mf4(op1, op2, vl);
35 }
36 
37 //
38 // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4(
39 // CHECK-RV64-NEXT:  entry:
40 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
41 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
42 //
test_vdiv_vx_i8mf4(vint8mf4_t op1,int8_t op2,size_t vl)43 vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
44   return vdiv_vx_i8mf4(op1, op2, vl);
45 }
46 
47 //
48 // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2(
49 // CHECK-RV64-NEXT:  entry:
50 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
51 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
52 //
test_vdiv_vv_i8mf2(vint8mf2_t op1,vint8mf2_t op2,size_t vl)53 vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
54   return vdiv_vv_i8mf2(op1, op2, vl);
55 }
56 
57 //
58 // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2(
59 // CHECK-RV64-NEXT:  entry:
60 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
61 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
62 //
test_vdiv_vx_i8mf2(vint8mf2_t op1,int8_t op2,size_t vl)63 vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
64   return vdiv_vx_i8mf2(op1, op2, vl);
65 }
66 
67 //
68 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1(
69 // CHECK-RV64-NEXT:  entry:
70 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
71 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
72 //
test_vdiv_vv_i8m1(vint8m1_t op1,vint8m1_t op2,size_t vl)73 vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
74   return vdiv_vv_i8m1(op1, op2, vl);
75 }
76 
77 //
78 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1(
79 // CHECK-RV64-NEXT:  entry:
80 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
81 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
82 //
test_vdiv_vx_i8m1(vint8m1_t op1,int8_t op2,size_t vl)83 vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
84   return vdiv_vx_i8m1(op1, op2, vl);
85 }
86 
87 //
88 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2(
89 // CHECK-RV64-NEXT:  entry:
90 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
91 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
92 //
test_vdiv_vv_i8m2(vint8m2_t op1,vint8m2_t op2,size_t vl)93 vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
94   return vdiv_vv_i8m2(op1, op2, vl);
95 }
96 
97 //
98 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2(
99 // CHECK-RV64-NEXT:  entry:
100 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
101 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
102 //
test_vdiv_vx_i8m2(vint8m2_t op1,int8_t op2,size_t vl)103 vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
104   return vdiv_vx_i8m2(op1, op2, vl);
105 }
106 
107 //
108 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4(
109 // CHECK-RV64-NEXT:  entry:
110 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
111 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
112 //
test_vdiv_vv_i8m4(vint8m4_t op1,vint8m4_t op2,size_t vl)113 vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
114   return vdiv_vv_i8m4(op1, op2, vl);
115 }
116 
117 //
118 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4(
119 // CHECK-RV64-NEXT:  entry:
120 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
121 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
122 //
test_vdiv_vx_i8m4(vint8m4_t op1,int8_t op2,size_t vl)123 vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
124   return vdiv_vx_i8m4(op1, op2, vl);
125 }
126 
127 //
128 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8(
129 // CHECK-RV64-NEXT:  entry:
130 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
131 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
132 //
test_vdiv_vv_i8m8(vint8m8_t op1,vint8m8_t op2,size_t vl)133 vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
134   return vdiv_vv_i8m8(op1, op2, vl);
135 }
136 
137 //
138 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8(
139 // CHECK-RV64-NEXT:  entry:
140 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
141 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
142 //
test_vdiv_vx_i8m8(vint8m8_t op1,int8_t op2,size_t vl)143 vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
144   return vdiv_vx_i8m8(op1, op2, vl);
145 }
146 
147 //
148 // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4(
149 // CHECK-RV64-NEXT:  entry:
150 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
151 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
152 //
test_vdiv_vv_i16mf4(vint16mf4_t op1,vint16mf4_t op2,size_t vl)153 vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
154   return vdiv_vv_i16mf4(op1, op2, vl);
155 }
156 
157 //
158 // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4(
159 // CHECK-RV64-NEXT:  entry:
160 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
161 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
162 //
test_vdiv_vx_i16mf4(vint16mf4_t op1,int16_t op2,size_t vl)163 vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
164   return vdiv_vx_i16mf4(op1, op2, vl);
165 }
166 
167 //
168 // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2(
169 // CHECK-RV64-NEXT:  entry:
170 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
171 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
172 //
test_vdiv_vv_i16mf2(vint16mf2_t op1,vint16mf2_t op2,size_t vl)173 vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
174   return vdiv_vv_i16mf2(op1, op2, vl);
175 }
176 
177 //
178 // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2(
179 // CHECK-RV64-NEXT:  entry:
180 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
181 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
182 //
test_vdiv_vx_i16mf2(vint16mf2_t op1,int16_t op2,size_t vl)183 vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
184   return vdiv_vx_i16mf2(op1, op2, vl);
185 }
186 
187 //
188 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1(
189 // CHECK-RV64-NEXT:  entry:
190 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
191 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
192 //
test_vdiv_vv_i16m1(vint16m1_t op1,vint16m1_t op2,size_t vl)193 vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
194   return vdiv_vv_i16m1(op1, op2, vl);
195 }
196 
197 //
198 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1(
199 // CHECK-RV64-NEXT:  entry:
200 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
201 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
202 //
test_vdiv_vx_i16m1(vint16m1_t op1,int16_t op2,size_t vl)203 vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
204   return vdiv_vx_i16m1(op1, op2, vl);
205 }
206 
207 //
208 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2(
209 // CHECK-RV64-NEXT:  entry:
210 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
211 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
212 //
test_vdiv_vv_i16m2(vint16m2_t op1,vint16m2_t op2,size_t vl)213 vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
214   return vdiv_vv_i16m2(op1, op2, vl);
215 }
216 
217 //
218 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2(
219 // CHECK-RV64-NEXT:  entry:
220 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
221 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
222 //
test_vdiv_vx_i16m2(vint16m2_t op1,int16_t op2,size_t vl)223 vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
224   return vdiv_vx_i16m2(op1, op2, vl);
225 }
226 
227 //
228 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4(
229 // CHECK-RV64-NEXT:  entry:
230 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
231 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
232 //
test_vdiv_vv_i16m4(vint16m4_t op1,vint16m4_t op2,size_t vl)233 vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
234   return vdiv_vv_i16m4(op1, op2, vl);
235 }
236 
237 //
238 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4(
239 // CHECK-RV64-NEXT:  entry:
240 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
241 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
242 //
test_vdiv_vx_i16m4(vint16m4_t op1,int16_t op2,size_t vl)243 vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
244   return vdiv_vx_i16m4(op1, op2, vl);
245 }
246 
247 //
248 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8(
249 // CHECK-RV64-NEXT:  entry:
250 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
251 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
252 //
test_vdiv_vv_i16m8(vint16m8_t op1,vint16m8_t op2,size_t vl)253 vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
254   return vdiv_vv_i16m8(op1, op2, vl);
255 }
256 
257 //
258 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8(
259 // CHECK-RV64-NEXT:  entry:
260 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
261 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
262 //
test_vdiv_vx_i16m8(vint16m8_t op1,int16_t op2,size_t vl)263 vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
264   return vdiv_vx_i16m8(op1, op2, vl);
265 }
266 
267 //
268 // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2(
269 // CHECK-RV64-NEXT:  entry:
270 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
271 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
272 //
test_vdiv_vv_i32mf2(vint32mf2_t op1,vint32mf2_t op2,size_t vl)273 vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
274   return vdiv_vv_i32mf2(op1, op2, vl);
275 }
276 
277 //
278 // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2(
279 // CHECK-RV64-NEXT:  entry:
280 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
281 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
282 //
test_vdiv_vx_i32mf2(vint32mf2_t op1,int32_t op2,size_t vl)283 vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
284   return vdiv_vx_i32mf2(op1, op2, vl);
285 }
286 
287 //
288 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1(
289 // CHECK-RV64-NEXT:  entry:
290 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
291 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
292 //
test_vdiv_vv_i32m1(vint32m1_t op1,vint32m1_t op2,size_t vl)293 vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
294   return vdiv_vv_i32m1(op1, op2, vl);
295 }
296 
297 //
298 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1(
299 // CHECK-RV64-NEXT:  entry:
300 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
301 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
302 //
test_vdiv_vx_i32m1(vint32m1_t op1,int32_t op2,size_t vl)303 vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
304   return vdiv_vx_i32m1(op1, op2, vl);
305 }
306 
307 //
308 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2(
309 // CHECK-RV64-NEXT:  entry:
310 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
311 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
312 //
test_vdiv_vv_i32m2(vint32m2_t op1,vint32m2_t op2,size_t vl)313 vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
314   return vdiv_vv_i32m2(op1, op2, vl);
315 }
316 
317 //
318 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2(
319 // CHECK-RV64-NEXT:  entry:
320 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
321 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
322 //
test_vdiv_vx_i32m2(vint32m2_t op1,int32_t op2,size_t vl)323 vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
324   return vdiv_vx_i32m2(op1, op2, vl);
325 }
326 
327 //
328 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4(
329 // CHECK-RV64-NEXT:  entry:
330 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
331 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
332 //
test_vdiv_vv_i32m4(vint32m4_t op1,vint32m4_t op2,size_t vl)333 vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
334   return vdiv_vv_i32m4(op1, op2, vl);
335 }
336 
337 //
338 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4(
339 // CHECK-RV64-NEXT:  entry:
340 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
341 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
342 //
test_vdiv_vx_i32m4(vint32m4_t op1,int32_t op2,size_t vl)343 vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
344   return vdiv_vx_i32m4(op1, op2, vl);
345 }
346 
347 //
348 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8(
349 // CHECK-RV64-NEXT:  entry:
350 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
351 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
352 //
test_vdiv_vv_i32m8(vint32m8_t op1,vint32m8_t op2,size_t vl)353 vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
354   return vdiv_vv_i32m8(op1, op2, vl);
355 }
356 
357 //
358 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8(
359 // CHECK-RV64-NEXT:  entry:
360 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
361 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
362 //
test_vdiv_vx_i32m8(vint32m8_t op1,int32_t op2,size_t vl)363 vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
364   return vdiv_vx_i32m8(op1, op2, vl);
365 }
366 
367 //
368 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1(
369 // CHECK-RV64-NEXT:  entry:
370 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
371 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
372 //
test_vdiv_vv_i64m1(vint64m1_t op1,vint64m1_t op2,size_t vl)373 vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
374   return vdiv_vv_i64m1(op1, op2, vl);
375 }
376 
377 //
378 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1(
379 // CHECK-RV64-NEXT:  entry:
380 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
381 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
382 //
test_vdiv_vx_i64m1(vint64m1_t op1,int64_t op2,size_t vl)383 vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
384   return vdiv_vx_i64m1(op1, op2, vl);
385 }
386 
387 //
388 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2(
389 // CHECK-RV64-NEXT:  entry:
390 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
391 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
392 //
test_vdiv_vv_i64m2(vint64m2_t op1,vint64m2_t op2,size_t vl)393 vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
394   return vdiv_vv_i64m2(op1, op2, vl);
395 }
396 
397 //
398 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2(
399 // CHECK-RV64-NEXT:  entry:
400 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
401 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
402 //
test_vdiv_vx_i64m2(vint64m2_t op1,int64_t op2,size_t vl)403 vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
404   return vdiv_vx_i64m2(op1, op2, vl);
405 }
406 
407 //
408 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4(
409 // CHECK-RV64-NEXT:  entry:
410 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
411 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
412 //
test_vdiv_vv_i64m4(vint64m4_t op1,vint64m4_t op2,size_t vl)413 vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
414   return vdiv_vv_i64m4(op1, op2, vl);
415 }
416 
417 //
418 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4(
419 // CHECK-RV64-NEXT:  entry:
420 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
421 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
422 //
test_vdiv_vx_i64m4(vint64m4_t op1,int64_t op2,size_t vl)423 vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
424   return vdiv_vx_i64m4(op1, op2, vl);
425 }
426 
427 //
428 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8(
429 // CHECK-RV64-NEXT:  entry:
430 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
431 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
432 //
test_vdiv_vv_i64m8(vint64m8_t op1,vint64m8_t op2,size_t vl)433 vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
434   return vdiv_vv_i64m8(op1, op2, vl);
435 }
436 
437 //
438 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8(
439 // CHECK-RV64-NEXT:  entry:
440 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
441 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
442 //
test_vdiv_vx_i64m8(vint64m8_t op1,int64_t op2,size_t vl)443 vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
444   return vdiv_vx_i64m8(op1, op2, vl);
445 }
446 
447 //
448 // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8(
449 // CHECK-RV64-NEXT:  entry:
450 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
451 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
452 //
test_vdivu_vv_u8mf8(vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)453 vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
454   return vdivu_vv_u8mf8(op1, op2, vl);
455 }
456 
457 //
458 // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8(
459 // CHECK-RV64-NEXT:  entry:
460 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
461 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
462 //
test_vdivu_vx_u8mf8(vuint8mf8_t op1,uint8_t op2,size_t vl)463 vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
464   return vdivu_vx_u8mf8(op1, op2, vl);
465 }
466 
467 //
468 // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4(
469 // CHECK-RV64-NEXT:  entry:
470 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
471 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
472 //
test_vdivu_vv_u8mf4(vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)473 vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
474   return vdivu_vv_u8mf4(op1, op2, vl);
475 }
476 
477 //
478 // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4(
479 // CHECK-RV64-NEXT:  entry:
480 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
481 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
482 //
test_vdivu_vx_u8mf4(vuint8mf4_t op1,uint8_t op2,size_t vl)483 vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
484   return vdivu_vx_u8mf4(op1, op2, vl);
485 }
486 
487 //
488 // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2(
489 // CHECK-RV64-NEXT:  entry:
490 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
491 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
492 //
test_vdivu_vv_u8mf2(vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)493 vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
494   return vdivu_vv_u8mf2(op1, op2, vl);
495 }
496 
497 //
498 // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2(
499 // CHECK-RV64-NEXT:  entry:
500 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
501 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
502 //
test_vdivu_vx_u8mf2(vuint8mf2_t op1,uint8_t op2,size_t vl)503 vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
504   return vdivu_vx_u8mf2(op1, op2, vl);
505 }
506 
507 //
508 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1(
509 // CHECK-RV64-NEXT:  entry:
510 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
511 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
512 //
test_vdivu_vv_u8m1(vuint8m1_t op1,vuint8m1_t op2,size_t vl)513 vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
514   return vdivu_vv_u8m1(op1, op2, vl);
515 }
516 
517 //
518 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1(
519 // CHECK-RV64-NEXT:  entry:
520 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
521 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
522 //
test_vdivu_vx_u8m1(vuint8m1_t op1,uint8_t op2,size_t vl)523 vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
524   return vdivu_vx_u8m1(op1, op2, vl);
525 }
526 
527 //
528 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2(
529 // CHECK-RV64-NEXT:  entry:
530 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
531 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
532 //
test_vdivu_vv_u8m2(vuint8m2_t op1,vuint8m2_t op2,size_t vl)533 vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
534   return vdivu_vv_u8m2(op1, op2, vl);
535 }
536 
537 //
538 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2(
539 // CHECK-RV64-NEXT:  entry:
540 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
541 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
542 //
test_vdivu_vx_u8m2(vuint8m2_t op1,uint8_t op2,size_t vl)543 vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
544   return vdivu_vx_u8m2(op1, op2, vl);
545 }
546 
547 //
548 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4(
549 // CHECK-RV64-NEXT:  entry:
550 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
551 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
552 //
test_vdivu_vv_u8m4(vuint8m4_t op1,vuint8m4_t op2,size_t vl)553 vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
554   return vdivu_vv_u8m4(op1, op2, vl);
555 }
556 
557 //
558 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4(
559 // CHECK-RV64-NEXT:  entry:
560 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
561 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
562 //
test_vdivu_vx_u8m4(vuint8m4_t op1,uint8_t op2,size_t vl)563 vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
564   return vdivu_vx_u8m4(op1, op2, vl);
565 }
566 
567 //
568 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8(
569 // CHECK-RV64-NEXT:  entry:
570 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
571 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
572 //
test_vdivu_vv_u8m8(vuint8m8_t op1,vuint8m8_t op2,size_t vl)573 vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
574   return vdivu_vv_u8m8(op1, op2, vl);
575 }
576 
577 //
578 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8(
579 // CHECK-RV64-NEXT:  entry:
580 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
581 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
582 //
test_vdivu_vx_u8m8(vuint8m8_t op1,uint8_t op2,size_t vl)583 vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
584   return vdivu_vx_u8m8(op1, op2, vl);
585 }
586 
587 //
588 // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4(
589 // CHECK-RV64-NEXT:  entry:
590 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
591 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
592 //
test_vdivu_vv_u16mf4(vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)593 vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
594   return vdivu_vv_u16mf4(op1, op2, vl);
595 }
596 
597 //
598 // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4(
599 // CHECK-RV64-NEXT:  entry:
600 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
601 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
602 //
test_vdivu_vx_u16mf4(vuint16mf4_t op1,uint16_t op2,size_t vl)603 vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
604   return vdivu_vx_u16mf4(op1, op2, vl);
605 }
606 
607 //
608 // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2(
609 // CHECK-RV64-NEXT:  entry:
610 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
611 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
612 //
test_vdivu_vv_u16mf2(vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)613 vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
614   return vdivu_vv_u16mf2(op1, op2, vl);
615 }
616 
617 //
618 // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2(
619 // CHECK-RV64-NEXT:  entry:
620 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
621 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
622 //
test_vdivu_vx_u16mf2(vuint16mf2_t op1,uint16_t op2,size_t vl)623 vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
624   return vdivu_vx_u16mf2(op1, op2, vl);
625 }
626 
627 //
628 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1(
629 // CHECK-RV64-NEXT:  entry:
630 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
631 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
632 //
test_vdivu_vv_u16m1(vuint16m1_t op1,vuint16m1_t op2,size_t vl)633 vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
634   return vdivu_vv_u16m1(op1, op2, vl);
635 }
636 
637 //
638 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1(
639 // CHECK-RV64-NEXT:  entry:
640 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
641 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
642 //
test_vdivu_vx_u16m1(vuint16m1_t op1,uint16_t op2,size_t vl)643 vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
644   return vdivu_vx_u16m1(op1, op2, vl);
645 }
646 
647 //
648 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2(
649 // CHECK-RV64-NEXT:  entry:
650 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
651 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
652 //
test_vdivu_vv_u16m2(vuint16m2_t op1,vuint16m2_t op2,size_t vl)653 vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
654   return vdivu_vv_u16m2(op1, op2, vl);
655 }
656 
657 //
658 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2(
659 // CHECK-RV64-NEXT:  entry:
660 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
661 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
662 //
test_vdivu_vx_u16m2(vuint16m2_t op1,uint16_t op2,size_t vl)663 vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
664   return vdivu_vx_u16m2(op1, op2, vl);
665 }
666 
667 //
668 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4(
669 // CHECK-RV64-NEXT:  entry:
670 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
671 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
672 //
test_vdivu_vv_u16m4(vuint16m4_t op1,vuint16m4_t op2,size_t vl)673 vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
674   return vdivu_vv_u16m4(op1, op2, vl);
675 }
676 
677 //
678 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4(
679 // CHECK-RV64-NEXT:  entry:
680 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
681 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
682 //
test_vdivu_vx_u16m4(vuint16m4_t op1,uint16_t op2,size_t vl)683 vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
684   return vdivu_vx_u16m4(op1, op2, vl);
685 }
686 
687 //
688 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8(
689 // CHECK-RV64-NEXT:  entry:
690 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
691 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
692 //
test_vdivu_vv_u16m8(vuint16m8_t op1,vuint16m8_t op2,size_t vl)693 vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
694   return vdivu_vv_u16m8(op1, op2, vl);
695 }
696 
697 //
698 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8(
699 // CHECK-RV64-NEXT:  entry:
700 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
701 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
702 //
test_vdivu_vx_u16m8(vuint16m8_t op1,uint16_t op2,size_t vl)703 vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
704   return vdivu_vx_u16m8(op1, op2, vl);
705 }
706 
707 //
708 // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2(
709 // CHECK-RV64-NEXT:  entry:
710 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
711 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
712 //
test_vdivu_vv_u32mf2(vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)713 vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
714   return vdivu_vv_u32mf2(op1, op2, vl);
715 }
716 
717 //
718 // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2(
719 // CHECK-RV64-NEXT:  entry:
720 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
721 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
722 //
test_vdivu_vx_u32mf2(vuint32mf2_t op1,uint32_t op2,size_t vl)723 vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
724   return vdivu_vx_u32mf2(op1, op2, vl);
725 }
726 
727 //
728 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1(
729 // CHECK-RV64-NEXT:  entry:
730 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
731 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
732 //
test_vdivu_vv_u32m1(vuint32m1_t op1,vuint32m1_t op2,size_t vl)733 vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
734   return vdivu_vv_u32m1(op1, op2, vl);
735 }
736 
737 //
738 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1(
739 // CHECK-RV64-NEXT:  entry:
740 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
741 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
742 //
test_vdivu_vx_u32m1(vuint32m1_t op1,uint32_t op2,size_t vl)743 vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
744   return vdivu_vx_u32m1(op1, op2, vl);
745 }
746 
747 //
748 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2(
749 // CHECK-RV64-NEXT:  entry:
750 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
751 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
752 //
test_vdivu_vv_u32m2(vuint32m2_t op1,vuint32m2_t op2,size_t vl)753 vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
754   return vdivu_vv_u32m2(op1, op2, vl);
755 }
756 
757 //
758 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2(
759 // CHECK-RV64-NEXT:  entry:
760 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
761 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
762 //
test_vdivu_vx_u32m2(vuint32m2_t op1,uint32_t op2,size_t vl)763 vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
764   return vdivu_vx_u32m2(op1, op2, vl);
765 }
766 
767 //
768 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4(
769 // CHECK-RV64-NEXT:  entry:
770 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
771 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
772 //
test_vdivu_vv_u32m4(vuint32m4_t op1,vuint32m4_t op2,size_t vl)773 vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
774   return vdivu_vv_u32m4(op1, op2, vl);
775 }
776 
777 //
778 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4(
779 // CHECK-RV64-NEXT:  entry:
780 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
781 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
782 //
test_vdivu_vx_u32m4(vuint32m4_t op1,uint32_t op2,size_t vl)783 vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
784   return vdivu_vx_u32m4(op1, op2, vl);
785 }
786 
787 //
788 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8(
789 // CHECK-RV64-NEXT:  entry:
790 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
791 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
792 //
test_vdivu_vv_u32m8(vuint32m8_t op1,vuint32m8_t op2,size_t vl)793 vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
794   return vdivu_vv_u32m8(op1, op2, vl);
795 }
796 
797 //
798 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8(
799 // CHECK-RV64-NEXT:  entry:
800 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
801 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
802 //
test_vdivu_vx_u32m8(vuint32m8_t op1,uint32_t op2,size_t vl)803 vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
804   return vdivu_vx_u32m8(op1, op2, vl);
805 }
806 
807 //
808 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1(
809 // CHECK-RV64-NEXT:  entry:
810 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
811 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
812 //
test_vdivu_vv_u64m1(vuint64m1_t op1,vuint64m1_t op2,size_t vl)813 vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
814   return vdivu_vv_u64m1(op1, op2, vl);
815 }
816 
817 //
818 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1(
819 // CHECK-RV64-NEXT:  entry:
820 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
821 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
822 //
test_vdivu_vx_u64m1(vuint64m1_t op1,uint64_t op2,size_t vl)823 vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
824   return vdivu_vx_u64m1(op1, op2, vl);
825 }
826 
827 //
828 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2(
829 // CHECK-RV64-NEXT:  entry:
830 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
831 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
832 //
test_vdivu_vv_u64m2(vuint64m2_t op1,vuint64m2_t op2,size_t vl)833 vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
834   return vdivu_vv_u64m2(op1, op2, vl);
835 }
836 
837 //
838 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2(
839 // CHECK-RV64-NEXT:  entry:
840 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
841 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
842 //
test_vdivu_vx_u64m2(vuint64m2_t op1,uint64_t op2,size_t vl)843 vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
844   return vdivu_vx_u64m2(op1, op2, vl);
845 }
846 
847 //
848 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4(
849 // CHECK-RV64-NEXT:  entry:
850 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
851 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
852 //
test_vdivu_vv_u64m4(vuint64m4_t op1,vuint64m4_t op2,size_t vl)853 vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
854   return vdivu_vv_u64m4(op1, op2, vl);
855 }
856 
857 //
858 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4(
859 // CHECK-RV64-NEXT:  entry:
860 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
861 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
862 //
test_vdivu_vx_u64m4(vuint64m4_t op1,uint64_t op2,size_t vl)863 vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
864   return vdivu_vx_u64m4(op1, op2, vl);
865 }
866 
867 //
868 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8(
869 // CHECK-RV64-NEXT:  entry:
870 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
871 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
872 //
test_vdivu_vv_u64m8(vuint64m8_t op1,vuint64m8_t op2,size_t vl)873 vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
874   return vdivu_vv_u64m8(op1, op2, vl);
875 }
876 
877 //
878 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8(
879 // CHECK-RV64-NEXT:  entry:
880 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
881 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
882 //
test_vdivu_vx_u64m8(vuint64m8_t op1,uint64_t op2,size_t vl)883 vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
884   return vdivu_vx_u64m8(op1, op2, vl);
885 }
886 
887 //
888 // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8_m(
889 // CHECK-RV64-NEXT:  entry:
890 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
891 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
892 //
test_vdiv_vv_i8mf8_m(vbool64_t mask,vint8mf8_t maskedoff,vint8mf8_t op1,vint8mf8_t op2,size_t vl)893 vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
894   return vdiv_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
895 }
896 
897 //
898 // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_m(
899 // CHECK-RV64-NEXT:  entry:
900 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
901 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
902 //
test_vdiv_vx_i8mf8_m(vbool64_t mask,vint8mf8_t maskedoff,vint8mf8_t op1,int8_t op2,size_t vl)903 vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
904   return vdiv_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
905 }
906 
907 //
908 // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_m(
909 // CHECK-RV64-NEXT:  entry:
910 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
911 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
912 //
test_vdiv_vv_i8mf4_m(vbool32_t mask,vint8mf4_t maskedoff,vint8mf4_t op1,vint8mf4_t op2,size_t vl)913 vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
914   return vdiv_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
915 }
916 
917 //
918 // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_m(
919 // CHECK-RV64-NEXT:  entry:
920 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
921 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
922 //
test_vdiv_vx_i8mf4_m(vbool32_t mask,vint8mf4_t maskedoff,vint8mf4_t op1,int8_t op2,size_t vl)923 vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
924   return vdiv_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
925 }
926 
927 //
928 // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_m(
929 // CHECK-RV64-NEXT:  entry:
930 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
931 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
932 //
test_vdiv_vv_i8mf2_m(vbool16_t mask,vint8mf2_t maskedoff,vint8mf2_t op1,vint8mf2_t op2,size_t vl)933 vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
934   return vdiv_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
935 }
936 
937 //
938 // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_m(
939 // CHECK-RV64-NEXT:  entry:
940 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
941 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
942 //
test_vdiv_vx_i8mf2_m(vbool16_t mask,vint8mf2_t maskedoff,vint8mf2_t op1,int8_t op2,size_t vl)943 vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
944   return vdiv_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
945 }
946 
947 //
948 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_m(
949 // CHECK-RV64-NEXT:  entry:
950 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
951 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
952 //
test_vdiv_vv_i8m1_m(vbool8_t mask,vint8m1_t maskedoff,vint8m1_t op1,vint8m1_t op2,size_t vl)953 vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
954   return vdiv_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
955 }
956 
957 //
958 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_m(
959 // CHECK-RV64-NEXT:  entry:
960 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
961 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
962 //
test_vdiv_vx_i8m1_m(vbool8_t mask,vint8m1_t maskedoff,vint8m1_t op1,int8_t op2,size_t vl)963 vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
964   return vdiv_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
965 }
966 
967 //
968 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_m(
969 // CHECK-RV64-NEXT:  entry:
970 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
971 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
972 //
test_vdiv_vv_i8m2_m(vbool4_t mask,vint8m2_t maskedoff,vint8m2_t op1,vint8m2_t op2,size_t vl)973 vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
974   return vdiv_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
975 }
976 
977 //
978 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_m(
979 // CHECK-RV64-NEXT:  entry:
980 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
981 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
982 //
test_vdiv_vx_i8m2_m(vbool4_t mask,vint8m2_t maskedoff,vint8m2_t op1,int8_t op2,size_t vl)983 vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
984   return vdiv_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
985 }
986 
987 //
988 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_m(
989 // CHECK-RV64-NEXT:  entry:
990 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
991 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
992 //
test_vdiv_vv_i8m4_m(vbool2_t mask,vint8m4_t maskedoff,vint8m4_t op1,vint8m4_t op2,size_t vl)993 vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
994   return vdiv_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
995 }
996 
997 //
998 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_m(
999 // CHECK-RV64-NEXT:  entry:
1000 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1001 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
1002 //
test_vdiv_vx_i8m4_m(vbool2_t mask,vint8m4_t maskedoff,vint8m4_t op1,int8_t op2,size_t vl)1003 vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
1004   return vdiv_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
1005 }
1006 
1007 //
1008 // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_m(
1009 // CHECK-RV64-NEXT:  entry:
1010 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1011 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
1012 //
test_vdiv_vv_i8m8_m(vbool1_t mask,vint8m8_t maskedoff,vint8m8_t op1,vint8m8_t op2,size_t vl)1013 vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
1014   return vdiv_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
1015 }
1016 
1017 //
1018 // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_m(
1019 // CHECK-RV64-NEXT:  entry:
1020 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1021 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
1022 //
test_vdiv_vx_i8m8_m(vbool1_t mask,vint8m8_t maskedoff,vint8m8_t op1,int8_t op2,size_t vl)1023 vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
1024   return vdiv_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
1025 }
1026 
1027 //
1028 // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_m(
1029 // CHECK-RV64-NEXT:  entry:
1030 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1031 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
1032 //
test_vdiv_vv_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,vint16mf4_t op2,size_t vl)1033 vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
1034   return vdiv_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
1035 }
1036 
1037 //
1038 // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_m(
1039 // CHECK-RV64-NEXT:  entry:
1040 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1041 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
1042 //
test_vdiv_vx_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,int16_t op2,size_t vl)1043 vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
1044   return vdiv_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
1045 }
1046 
1047 //
1048 // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_m(
1049 // CHECK-RV64-NEXT:  entry:
1050 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1051 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
1052 //
test_vdiv_vv_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,vint16mf2_t op2,size_t vl)1053 vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
1054   return vdiv_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
1055 }
1056 
1057 //
1058 // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_m(
1059 // CHECK-RV64-NEXT:  entry:
1060 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1061 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
1062 //
test_vdiv_vx_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,int16_t op2,size_t vl)1063 vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
1064   return vdiv_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
1065 }
1066 
1067 //
1068 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_m(
1069 // CHECK-RV64-NEXT:  entry:
1070 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1071 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
1072 //
test_vdiv_vv_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,vint16m1_t op2,size_t vl)1073 vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
1074   return vdiv_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
1075 }
1076 
1077 //
1078 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_m(
1079 // CHECK-RV64-NEXT:  entry:
1080 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1081 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
1082 //
test_vdiv_vx_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,int16_t op2,size_t vl)1083 vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
1084   return vdiv_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
1085 }
1086 
1087 //
1088 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_m(
1089 // CHECK-RV64-NEXT:  entry:
1090 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1091 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
1092 //
test_vdiv_vv_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,vint16m2_t op2,size_t vl)1093 vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
1094   return vdiv_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
1095 }
1096 
1097 //
1098 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_m(
1099 // CHECK-RV64-NEXT:  entry:
1100 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1101 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
1102 //
test_vdiv_vx_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,int16_t op2,size_t vl)1103 vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
1104   return vdiv_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
1105 }
1106 
1107 //
1108 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_m(
1109 // CHECK-RV64-NEXT:  entry:
1110 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1111 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1112 //
test_vdiv_vv_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,vint16m4_t op2,size_t vl)1113 vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
1114   return vdiv_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
1115 }
1116 
1117 //
1118 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_m(
1119 // CHECK-RV64-NEXT:  entry:
1120 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1121 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1122 //
test_vdiv_vx_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,int16_t op2,size_t vl)1123 vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
1124   return vdiv_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
1125 }
1126 
1127 //
1128 // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_m(
1129 // CHECK-RV64-NEXT:  entry:
1130 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1131 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1132 //
test_vdiv_vv_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,vint16m8_t op2,size_t vl)1133 vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
1134   return vdiv_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
1135 }
1136 
1137 //
1138 // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_m(
1139 // CHECK-RV64-NEXT:  entry:
1140 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1141 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1142 //
test_vdiv_vx_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,int16_t op2,size_t vl)1143 vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
1144   return vdiv_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
1145 }
1146 
1147 //
1148 // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_m(
1149 // CHECK-RV64-NEXT:  entry:
1150 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1151 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1152 //
test_vdiv_vv_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,vint32mf2_t op2,size_t vl)1153 vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
1154   return vdiv_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
1155 }
1156 
1157 //
1158 // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_m(
1159 // CHECK-RV64-NEXT:  entry:
1160 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1161 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1162 //
test_vdiv_vx_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,int32_t op2,size_t vl)1163 vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
1164   return vdiv_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
1165 }
1166 
1167 //
1168 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_m(
1169 // CHECK-RV64-NEXT:  entry:
1170 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1171 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1172 //
test_vdiv_vv_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,vint32m1_t op2,size_t vl)1173 vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
1174   return vdiv_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
1175 }
1176 
1177 //
1178 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_m(
1179 // CHECK-RV64-NEXT:  entry:
1180 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1181 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1182 //
test_vdiv_vx_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,int32_t op2,size_t vl)1183 vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
1184   return vdiv_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
1185 }
1186 
1187 //
1188 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_m(
1189 // CHECK-RV64-NEXT:  entry:
1190 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1191 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1192 //
test_vdiv_vv_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,vint32m2_t op2,size_t vl)1193 vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
1194   return vdiv_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
1195 }
1196 
1197 //
1198 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_m(
1199 // CHECK-RV64-NEXT:  entry:
1200 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1201 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1202 //
test_vdiv_vx_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,int32_t op2,size_t vl)1203 vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
1204   return vdiv_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
1205 }
1206 
1207 //
1208 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_m(
1209 // CHECK-RV64-NEXT:  entry:
1210 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1211 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1212 //
test_vdiv_vv_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,vint32m4_t op2,size_t vl)1213 vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
1214   return vdiv_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
1215 }
1216 
1217 //
1218 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_m(
1219 // CHECK-RV64-NEXT:  entry:
1220 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1221 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1222 //
test_vdiv_vx_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,int32_t op2,size_t vl)1223 vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
1224   return vdiv_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
1225 }
1226 
1227 //
1228 // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_m(
1229 // CHECK-RV64-NEXT:  entry:
1230 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1231 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1232 //
test_vdiv_vv_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,vint32m8_t op2,size_t vl)1233 vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
1234   return vdiv_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
1235 }
1236 
1237 //
1238 // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_m(
1239 // CHECK-RV64-NEXT:  entry:
1240 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1241 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1242 //
test_vdiv_vx_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,int32_t op2,size_t vl)1243 vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
1244   return vdiv_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
1245 }
1246 
1247 //
1248 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_m(
1249 // CHECK-RV64-NEXT:  entry:
1250 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1251 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1252 //
test_vdiv_vv_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,vint64m1_t op2,size_t vl)1253 vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
1254   return vdiv_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
1255 }
1256 
1257 //
1258 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_m(
1259 // CHECK-RV64-NEXT:  entry:
1260 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1261 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1262 //
test_vdiv_vx_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,int64_t op2,size_t vl)1263 vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
1264   return vdiv_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
1265 }
1266 
1267 //
1268 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_m(
1269 // CHECK-RV64-NEXT:  entry:
1270 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1271 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1272 //
test_vdiv_vv_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,vint64m2_t op2,size_t vl)1273 vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
1274   return vdiv_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
1275 }
1276 
1277 //
1278 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_m(
1279 // CHECK-RV64-NEXT:  entry:
1280 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1281 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1282 //
test_vdiv_vx_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,int64_t op2,size_t vl)1283 vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
1284   return vdiv_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
1285 }
1286 
1287 //
1288 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_m(
1289 // CHECK-RV64-NEXT:  entry:
1290 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1291 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1292 //
test_vdiv_vv_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,vint64m4_t op2,size_t vl)1293 vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
1294   return vdiv_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
1295 }
1296 
1297 //
1298 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_m(
1299 // CHECK-RV64-NEXT:  entry:
1300 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1301 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1302 //
test_vdiv_vx_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,int64_t op2,size_t vl)1303 vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
1304   return vdiv_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
1305 }
1306 
1307 //
1308 // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_m(
1309 // CHECK-RV64-NEXT:  entry:
1310 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1311 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1312 //
test_vdiv_vv_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,vint64m8_t op2,size_t vl)1313 vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
1314   return vdiv_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
1315 }
1316 
1317 //
1318 // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_m(
1319 // CHECK-RV64-NEXT:  entry:
1320 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1321 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1322 //
test_vdiv_vx_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,int64_t op2,size_t vl)1323 vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
1324   return vdiv_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
1325 }
1326 
1327 //
1328 // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_m(
1329 // CHECK-RV64-NEXT:  entry:
1330 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1331 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
1332 //
test_vdivu_vv_u8mf8_m(vbool64_t mask,vuint8mf8_t maskedoff,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)1333 vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
1334   return vdivu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
1335 }
1336 
1337 //
1338 // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_m(
1339 // CHECK-RV64-NEXT:  entry:
1340 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1341 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
1342 //
test_vdivu_vx_u8mf8_m(vbool64_t mask,vuint8mf8_t maskedoff,vuint8mf8_t op1,uint8_t op2,size_t vl)1343 vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
1344   return vdivu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
1345 }
1346 
1347 //
1348 // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_m(
1349 // CHECK-RV64-NEXT:  entry:
1350 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1351 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
1352 //
test_vdivu_vv_u8mf4_m(vbool32_t mask,vuint8mf4_t maskedoff,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)1353 vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
1354   return vdivu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
1355 }
1356 
1357 //
1358 // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_m(
1359 // CHECK-RV64-NEXT:  entry:
1360 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1361 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
1362 //
test_vdivu_vx_u8mf4_m(vbool32_t mask,vuint8mf4_t maskedoff,vuint8mf4_t op1,uint8_t op2,size_t vl)1363 vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
1364   return vdivu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
1365 }
1366 
1367 //
1368 // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_m(
1369 // CHECK-RV64-NEXT:  entry:
1370 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1371 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
1372 //
test_vdivu_vv_u8mf2_m(vbool16_t mask,vuint8mf2_t maskedoff,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)1373 vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
1374   return vdivu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
1375 }
1376 
1377 //
1378 // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_m(
1379 // CHECK-RV64-NEXT:  entry:
1380 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1381 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
1382 //
test_vdivu_vx_u8mf2_m(vbool16_t mask,vuint8mf2_t maskedoff,vuint8mf2_t op1,uint8_t op2,size_t vl)1383 vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
1384   return vdivu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
1385 }
1386 
1387 //
1388 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_m(
1389 // CHECK-RV64-NEXT:  entry:
1390 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1391 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
1392 //
test_vdivu_vv_u8m1_m(vbool8_t mask,vuint8m1_t maskedoff,vuint8m1_t op1,vuint8m1_t op2,size_t vl)1393 vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
1394   return vdivu_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
1395 }
1396 
1397 //
1398 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_m(
1399 // CHECK-RV64-NEXT:  entry:
1400 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1401 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
1402 //
test_vdivu_vx_u8m1_m(vbool8_t mask,vuint8m1_t maskedoff,vuint8m1_t op1,uint8_t op2,size_t vl)1403 vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
1404   return vdivu_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
1405 }
1406 
1407 //
1408 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_m(
1409 // CHECK-RV64-NEXT:  entry:
1410 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1411 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
1412 //
test_vdivu_vv_u8m2_m(vbool4_t mask,vuint8m2_t maskedoff,vuint8m2_t op1,vuint8m2_t op2,size_t vl)1413 vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
1414   return vdivu_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
1415 }
1416 
1417 //
1418 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_m(
1419 // CHECK-RV64-NEXT:  entry:
1420 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1421 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
1422 //
test_vdivu_vx_u8m2_m(vbool4_t mask,vuint8m2_t maskedoff,vuint8m2_t op1,uint8_t op2,size_t vl)1423 vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
1424   return vdivu_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
1425 }
1426 
1427 //
1428 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_m(
1429 // CHECK-RV64-NEXT:  entry:
1430 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1431 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
1432 //
test_vdivu_vv_u8m4_m(vbool2_t mask,vuint8m4_t maskedoff,vuint8m4_t op1,vuint8m4_t op2,size_t vl)1433 vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
1434   return vdivu_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
1435 }
1436 
1437 //
1438 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_m(
1439 // CHECK-RV64-NEXT:  entry:
1440 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1441 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
1442 //
test_vdivu_vx_u8m4_m(vbool2_t mask,vuint8m4_t maskedoff,vuint8m4_t op1,uint8_t op2,size_t vl)1443 vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
1444   return vdivu_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
1445 }
1446 
1447 //
1448 // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_m(
1449 // CHECK-RV64-NEXT:  entry:
1450 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1451 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
1452 //
test_vdivu_vv_u8m8_m(vbool1_t mask,vuint8m8_t maskedoff,vuint8m8_t op1,vuint8m8_t op2,size_t vl)1453 vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
1454   return vdivu_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
1455 }
1456 
1457 //
1458 // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_m(
1459 // CHECK-RV64-NEXT:  entry:
1460 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1461 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
1462 //
test_vdivu_vx_u8m8_m(vbool1_t mask,vuint8m8_t maskedoff,vuint8m8_t op1,uint8_t op2,size_t vl)1463 vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
1464   return vdivu_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
1465 }
1466 
1467 //
1468 // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_m(
1469 // CHECK-RV64-NEXT:  entry:
1470 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1471 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
1472 //
test_vdivu_vv_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)1473 vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
1474   return vdivu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
1475 }
1476 
1477 //
1478 // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_m(
1479 // CHECK-RV64-NEXT:  entry:
1480 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1481 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
1482 //
test_vdivu_vx_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,uint16_t op2,size_t vl)1483 vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
1484   return vdivu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
1485 }
1486 
1487 //
1488 // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_m(
1489 // CHECK-RV64-NEXT:  entry:
1490 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1491 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
1492 //
test_vdivu_vv_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)1493 vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
1494   return vdivu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
1495 }
1496 
1497 //
1498 // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_m(
1499 // CHECK-RV64-NEXT:  entry:
1500 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1501 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
1502 //
test_vdivu_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,uint16_t op2,size_t vl)1503 vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
1504   return vdivu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
1505 }
1506 
1507 //
1508 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_m(
1509 // CHECK-RV64-NEXT:  entry:
1510 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1511 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
1512 //
test_vdivu_vv_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,vuint16m1_t op2,size_t vl)1513 vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
1514   return vdivu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
1515 }
1516 
1517 //
1518 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_m(
1519 // CHECK-RV64-NEXT:  entry:
1520 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1521 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
1522 //
test_vdivu_vx_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,uint16_t op2,size_t vl)1523 vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
1524   return vdivu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
1525 }
1526 
1527 //
1528 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_m(
1529 // CHECK-RV64-NEXT:  entry:
1530 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1531 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
1532 //
test_vdivu_vv_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,vuint16m2_t op2,size_t vl)1533 vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
1534   return vdivu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
1535 }
1536 
1537 //
1538 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_m(
1539 // CHECK-RV64-NEXT:  entry:
1540 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1541 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
1542 //
test_vdivu_vx_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,uint16_t op2,size_t vl)1543 vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
1544   return vdivu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
1545 }
1546 
1547 //
1548 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_m(
1549 // CHECK-RV64-NEXT:  entry:
1550 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1551 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1552 //
test_vdivu_vv_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,vuint16m4_t op2,size_t vl)1553 vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
1554   return vdivu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
1555 }
1556 
1557 //
1558 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_m(
1559 // CHECK-RV64-NEXT:  entry:
1560 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1561 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1562 //
test_vdivu_vx_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,uint16_t op2,size_t vl)1563 vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
1564   return vdivu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
1565 }
1566 
1567 //
1568 // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_m(
1569 // CHECK-RV64-NEXT:  entry:
1570 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1571 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1572 //
test_vdivu_vv_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,vuint16m8_t op2,size_t vl)1573 vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
1574   return vdivu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
1575 }
1576 
1577 //
1578 // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_m(
1579 // CHECK-RV64-NEXT:  entry:
1580 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1581 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1582 //
test_vdivu_vx_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,uint16_t op2,size_t vl)1583 vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
1584   return vdivu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
1585 }
1586 
1587 //
1588 // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_m(
1589 // CHECK-RV64-NEXT:  entry:
1590 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1591 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1592 //
test_vdivu_vv_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)1593 vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
1594   return vdivu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
1595 }
1596 
1597 //
1598 // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_m(
1599 // CHECK-RV64-NEXT:  entry:
1600 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1601 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1602 //
test_vdivu_vx_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,uint32_t op2,size_t vl)1603 vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
1604   return vdivu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
1605 }
1606 
1607 //
1608 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_m(
1609 // CHECK-RV64-NEXT:  entry:
1610 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1611 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1612 //
test_vdivu_vv_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,vuint32m1_t op2,size_t vl)1613 vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
1614   return vdivu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
1615 }
1616 
1617 //
1618 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_m(
1619 // CHECK-RV64-NEXT:  entry:
1620 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1621 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1622 //
test_vdivu_vx_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,uint32_t op2,size_t vl)1623 vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
1624   return vdivu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
1625 }
1626 
1627 //
1628 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_m(
1629 // CHECK-RV64-NEXT:  entry:
1630 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1631 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1632 //
test_vdivu_vv_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,vuint32m2_t op2,size_t vl)1633 vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
1634   return vdivu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
1635 }
1636 
1637 //
1638 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_m(
1639 // CHECK-RV64-NEXT:  entry:
1640 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1641 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1642 //
test_vdivu_vx_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,uint32_t op2,size_t vl)1643 vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
1644   return vdivu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
1645 }
1646 
1647 //
1648 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_m(
1649 // CHECK-RV64-NEXT:  entry:
1650 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1651 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1652 //
test_vdivu_vv_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,vuint32m4_t op2,size_t vl)1653 vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
1654   return vdivu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
1655 }
1656 
1657 //
1658 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_m(
1659 // CHECK-RV64-NEXT:  entry:
1660 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1661 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1662 //
test_vdivu_vx_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,uint32_t op2,size_t vl)1663 vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
1664   return vdivu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
1665 }
1666 
1667 //
1668 // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_m(
1669 // CHECK-RV64-NEXT:  entry:
1670 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1671 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1672 //
test_vdivu_vv_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,vuint32m8_t op2,size_t vl)1673 vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
1674   return vdivu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
1675 }
1676 
1677 //
1678 // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_m(
1679 // CHECK-RV64-NEXT:  entry:
1680 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1681 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1682 //
test_vdivu_vx_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,uint32_t op2,size_t vl)1683 vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
1684   return vdivu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
1685 }
1686 
1687 //
1688 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_m(
1689 // CHECK-RV64-NEXT:  entry:
1690 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1691 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1692 //
test_vdivu_vv_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,vuint64m1_t op2,size_t vl)1693 vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
1694   return vdivu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
1695 }
1696 
1697 //
1698 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_m(
1699 // CHECK-RV64-NEXT:  entry:
1700 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1701 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1702 //
test_vdivu_vx_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,uint64_t op2,size_t vl)1703 vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
1704   return vdivu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
1705 }
1706 
1707 //
1708 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_m(
1709 // CHECK-RV64-NEXT:  entry:
1710 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1711 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1712 //
test_vdivu_vv_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,vuint64m2_t op2,size_t vl)1713 vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
1714   return vdivu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
1715 }
1716 
1717 //
1718 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_m(
1719 // CHECK-RV64-NEXT:  entry:
1720 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1721 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1722 //
test_vdivu_vx_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,uint64_t op2,size_t vl)1723 vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
1724   return vdivu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
1725 }
1726 
1727 //
1728 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_m(
1729 // CHECK-RV64-NEXT:  entry:
1730 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1731 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1732 //
test_vdivu_vv_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,vuint64m4_t op2,size_t vl)1733 vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
1734   return vdivu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
1735 }
1736 
1737 //
1738 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_m(
1739 // CHECK-RV64-NEXT:  entry:
1740 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1741 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1742 //
test_vdivu_vx_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,uint64_t op2,size_t vl)1743 vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
1744   return vdivu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
1745 }
1746 
1747 //
1748 // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_m(
1749 // CHECK-RV64-NEXT:  entry:
1750 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1751 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1752 //
test_vdivu_vv_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,vuint64m8_t op2,size_t vl)1753 vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
1754   return vdivu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
1755 }
1756 
1757 //
1758 // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_m(
1759 // CHECK-RV64-NEXT:  entry:
1760 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1761 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1762 //
test_vdivu_vx_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,uint64_t op2,size_t vl)1763 vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
1764   return vdivu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
1765 }
1766