1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
4 
5 #include <riscv_vector.h>
6 
7 //
8 // CHECK-RV64-LABEL: @test_vid_v_u8mf8(
9 // CHECK-RV64-NEXT:  entry:
10 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8.i64(i64 [[VL:%.*]])
11 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
12 //
test_vid_v_u8mf8(size_t vl)13 vuint8mf8_t test_vid_v_u8mf8(size_t vl) { return vid_v_u8mf8(vl); }
14 
15 //
16 // CHECK-RV64-LABEL: @test_vid_v_u8mf4(
17 // CHECK-RV64-NEXT:  entry:
18 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8.i64(i64 [[VL:%.*]])
19 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
20 //
test_vid_v_u8mf4(size_t vl)21 vuint8mf4_t test_vid_v_u8mf4(size_t vl) { return vid_v_u8mf4(vl); }
22 
23 //
24 // CHECK-RV64-LABEL: @test_vid_v_u8mf2(
25 // CHECK-RV64-NEXT:  entry:
26 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8.i64(i64 [[VL:%.*]])
27 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
28 //
test_vid_v_u8mf2(size_t vl)29 vuint8mf2_t test_vid_v_u8mf2(size_t vl) { return vid_v_u8mf2(vl); }
30 
31 //
32 // CHECK-RV64-LABEL: @test_vid_v_u8m1(
33 // CHECK-RV64-NEXT:  entry:
34 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8.i64(i64 [[VL:%.*]])
35 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
36 //
test_vid_v_u8m1(size_t vl)37 vuint8m1_t test_vid_v_u8m1(size_t vl) { return vid_v_u8m1(vl); }
38 
39 //
40 // CHECK-RV64-LABEL: @test_vid_v_u8m2(
41 // CHECK-RV64-NEXT:  entry:
42 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8.i64(i64 [[VL:%.*]])
43 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
44 //
test_vid_v_u8m2(size_t vl)45 vuint8m2_t test_vid_v_u8m2(size_t vl) { return vid_v_u8m2(vl); }
46 
47 //
48 // CHECK-RV64-LABEL: @test_vid_v_u8m4(
49 // CHECK-RV64-NEXT:  entry:
50 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8.i64(i64 [[VL:%.*]])
51 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
52 //
test_vid_v_u8m4(size_t vl)53 vuint8m4_t test_vid_v_u8m4(size_t vl) { return vid_v_u8m4(vl); }
54 
55 //
56 // CHECK-RV64-LABEL: @test_vid_v_u8m8(
57 // CHECK-RV64-NEXT:  entry:
58 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vid.nxv64i8.i64(i64 [[VL:%.*]])
59 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
60 //
test_vid_v_u8m8(size_t vl)61 vuint8m8_t test_vid_v_u8m8(size_t vl) { return vid_v_u8m8(vl); }
62 
63 //
64 // CHECK-RV64-LABEL: @test_vid_v_u16mf4(
65 // CHECK-RV64-NEXT:  entry:
66 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16.i64(i64 [[VL:%.*]])
67 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
68 //
test_vid_v_u16mf4(size_t vl)69 vuint16mf4_t test_vid_v_u16mf4(size_t vl) { return vid_v_u16mf4(vl); }
70 
71 //
72 // CHECK-RV64-LABEL: @test_vid_v_u16mf2(
73 // CHECK-RV64-NEXT:  entry:
74 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16.i64(i64 [[VL:%.*]])
75 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
76 //
test_vid_v_u16mf2(size_t vl)77 vuint16mf2_t test_vid_v_u16mf2(size_t vl) { return vid_v_u16mf2(vl); }
78 
79 //
80 // CHECK-RV64-LABEL: @test_vid_v_u16m1(
81 // CHECK-RV64-NEXT:  entry:
82 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16.i64(i64 [[VL:%.*]])
83 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
84 //
test_vid_v_u16m1(size_t vl)85 vuint16m1_t test_vid_v_u16m1(size_t vl) { return vid_v_u16m1(vl); }
86 
87 //
88 // CHECK-RV64-LABEL: @test_vid_v_u16m2(
89 // CHECK-RV64-NEXT:  entry:
90 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16.i64(i64 [[VL:%.*]])
91 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
92 //
test_vid_v_u16m2(size_t vl)93 vuint16m2_t test_vid_v_u16m2(size_t vl) { return vid_v_u16m2(vl); }
94 
95 //
96 // CHECK-RV64-LABEL: @test_vid_v_u16m4(
97 // CHECK-RV64-NEXT:  entry:
98 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16.i64(i64 [[VL:%.*]])
99 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
100 //
test_vid_v_u16m4(size_t vl)101 vuint16m4_t test_vid_v_u16m4(size_t vl) { return vid_v_u16m4(vl); }
102 
103 //
104 // CHECK-RV64-LABEL: @test_vid_v_u16m8(
105 // CHECK-RV64-NEXT:  entry:
106 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16.i64(i64 [[VL:%.*]])
107 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
108 //
test_vid_v_u16m8(size_t vl)109 vuint16m8_t test_vid_v_u16m8(size_t vl) { return vid_v_u16m8(vl); }
110 
111 //
112 // CHECK-RV64-LABEL: @test_vid_v_u32mf2(
113 // CHECK-RV64-NEXT:  entry:
114 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32.i64(i64 [[VL:%.*]])
115 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
116 //
test_vid_v_u32mf2(size_t vl)117 vuint32mf2_t test_vid_v_u32mf2(size_t vl) { return vid_v_u32mf2(vl); }
118 
119 //
120 // CHECK-RV64-LABEL: @test_vid_v_u32m1(
121 // CHECK-RV64-NEXT:  entry:
122 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32.i64(i64 [[VL:%.*]])
123 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
124 //
test_vid_v_u32m1(size_t vl)125 vuint32m1_t test_vid_v_u32m1(size_t vl) { return vid_v_u32m1(vl); }
126 
127 //
128 // CHECK-RV64-LABEL: @test_vid_v_u32m2(
129 // CHECK-RV64-NEXT:  entry:
130 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32.i64(i64 [[VL:%.*]])
131 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
132 //
test_vid_v_u32m2(size_t vl)133 vuint32m2_t test_vid_v_u32m2(size_t vl) { return vid_v_u32m2(vl); }
134 
135 //
136 // CHECK-RV64-LABEL: @test_vid_v_u32m4(
137 // CHECK-RV64-NEXT:  entry:
138 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32.i64(i64 [[VL:%.*]])
139 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
140 //
test_vid_v_u32m4(size_t vl)141 vuint32m4_t test_vid_v_u32m4(size_t vl) { return vid_v_u32m4(vl); }
142 
143 //
144 // CHECK-RV64-LABEL: @test_vid_v_u32m8(
145 // CHECK-RV64-NEXT:  entry:
146 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32.i64(i64 [[VL:%.*]])
147 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
148 //
test_vid_v_u32m8(size_t vl)149 vuint32m8_t test_vid_v_u32m8(size_t vl) { return vid_v_u32m8(vl); }
150 
151 //
152 // CHECK-RV64-LABEL: @test_vid_v_u64m1(
153 // CHECK-RV64-NEXT:  entry:
154 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64.i64(i64 [[VL:%.*]])
155 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
156 //
test_vid_v_u64m1(size_t vl)157 vuint64m1_t test_vid_v_u64m1(size_t vl) { return vid_v_u64m1(vl); }
158 
159 //
160 // CHECK-RV64-LABEL: @test_vid_v_u64m2(
161 // CHECK-RV64-NEXT:  entry:
162 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64.i64(i64 [[VL:%.*]])
163 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
164 //
test_vid_v_u64m2(size_t vl)165 vuint64m2_t test_vid_v_u64m2(size_t vl) { return vid_v_u64m2(vl); }
166 
167 //
168 // CHECK-RV64-LABEL: @test_vid_v_u64m4(
169 // CHECK-RV64-NEXT:  entry:
170 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64.i64(i64 [[VL:%.*]])
171 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
172 //
test_vid_v_u64m4(size_t vl)173 vuint64m4_t test_vid_v_u64m4(size_t vl) { return vid_v_u64m4(vl); }
174 
175 //
176 // CHECK-RV64-LABEL: @test_vid_v_u64m8(
177 // CHECK-RV64-NEXT:  entry:
178 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64.i64(i64 [[VL:%.*]])
179 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
180 //
test_vid_v_u64m8(size_t vl)181 vuint64m8_t test_vid_v_u64m8(size_t vl) { return vid_v_u64m8(vl); }
182 
183 //
184 // CHECK-RV64-LABEL: @test_vid_v_u8mf8_m(
185 // CHECK-RV64-NEXT:  entry:
186 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
187 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
188 //
test_vid_v_u8mf8_m(vbool64_t mask,vuint8mf8_t maskedoff,size_t vl)189 vuint8mf8_t test_vid_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
190                                size_t vl) {
191   return vid_v_u8mf8_m(mask, maskedoff, vl);
192 }
193 
194 //
195 // CHECK-RV64-LABEL: @test_vid_v_u8mf4_m(
196 // CHECK-RV64-NEXT:  entry:
197 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
198 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
199 //
test_vid_v_u8mf4_m(vbool32_t mask,vuint8mf4_t maskedoff,size_t vl)200 vuint8mf4_t test_vid_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
201                                size_t vl) {
202   return vid_v_u8mf4_m(mask, maskedoff, vl);
203 }
204 
205 //
206 // CHECK-RV64-LABEL: @test_vid_v_u8mf2_m(
207 // CHECK-RV64-NEXT:  entry:
208 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
209 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
210 //
test_vid_v_u8mf2_m(vbool16_t mask,vuint8mf2_t maskedoff,size_t vl)211 vuint8mf2_t test_vid_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
212                                size_t vl) {
213   return vid_v_u8mf2_m(mask, maskedoff, vl);
214 }
215 
216 //
217 // CHECK-RV64-LABEL: @test_vid_v_u8m1_m(
218 // CHECK-RV64-NEXT:  entry:
219 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
220 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
221 //
test_vid_v_u8m1_m(vbool8_t mask,vuint8m1_t maskedoff,size_t vl)222 vuint8m1_t test_vid_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) {
223   return vid_v_u8m1_m(mask, maskedoff, vl);
224 }
225 
226 //
227 // CHECK-RV64-LABEL: @test_vid_v_u8m2_m(
228 // CHECK-RV64-NEXT:  entry:
229 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
230 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
231 //
test_vid_v_u8m2_m(vbool4_t mask,vuint8m2_t maskedoff,size_t vl)232 vuint8m2_t test_vid_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) {
233   return vid_v_u8m2_m(mask, maskedoff, vl);
234 }
235 
236 //
237 // CHECK-RV64-LABEL: @test_vid_v_u8m4_m(
238 // CHECK-RV64-NEXT:  entry:
239 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
240 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
241 //
test_vid_v_u8m4_m(vbool2_t mask,vuint8m4_t maskedoff,size_t vl)242 vuint8m4_t test_vid_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) {
243   return vid_v_u8m4_m(mask, maskedoff, vl);
244 }
245 
246 //
247 // CHECK-RV64-LABEL: @test_vid_v_u8m8_m(
248 // CHECK-RV64-NEXT:  entry:
249 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vid.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
250 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
251 //
test_vid_v_u8m8_m(vbool1_t mask,vuint8m8_t maskedoff,size_t vl)252 vuint8m8_t test_vid_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) {
253   return vid_v_u8m8_m(mask, maskedoff, vl);
254 }
255 
256 //
257 // CHECK-RV64-LABEL: @test_vid_v_u16mf4_m(
258 // CHECK-RV64-NEXT:  entry:
259 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
260 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
261 //
test_vid_v_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,size_t vl)262 vuint16mf4_t test_vid_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
263                                  size_t vl) {
264   return vid_v_u16mf4_m(mask, maskedoff, vl);
265 }
266 
267 //
268 // CHECK-RV64-LABEL: @test_vid_v_u16mf2_m(
269 // CHECK-RV64-NEXT:  entry:
270 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
271 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
272 //
test_vid_v_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,size_t vl)273 vuint16mf2_t test_vid_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
274                                  size_t vl) {
275   return vid_v_u16mf2_m(mask, maskedoff, vl);
276 }
277 
278 //
279 // CHECK-RV64-LABEL: @test_vid_v_u16m1_m(
280 // CHECK-RV64-NEXT:  entry:
281 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
282 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
283 //
test_vid_v_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,size_t vl)284 vuint16m1_t test_vid_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
285                                size_t vl) {
286   return vid_v_u16m1_m(mask, maskedoff, vl);
287 }
288 
289 //
290 // CHECK-RV64-LABEL: @test_vid_v_u16m2_m(
291 // CHECK-RV64-NEXT:  entry:
292 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
293 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
294 //
test_vid_v_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,size_t vl)295 vuint16m2_t test_vid_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
296                                size_t vl) {
297   return vid_v_u16m2_m(mask, maskedoff, vl);
298 }
299 
300 //
301 // CHECK-RV64-LABEL: @test_vid_v_u16m4_m(
302 // CHECK-RV64-NEXT:  entry:
303 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
304 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
305 //
test_vid_v_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,size_t vl)306 vuint16m4_t test_vid_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
307                                size_t vl) {
308   return vid_v_u16m4_m(mask, maskedoff, vl);
309 }
310 
311 //
312 // CHECK-RV64-LABEL: @test_vid_v_u16m8_m(
313 // CHECK-RV64-NEXT:  entry:
314 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
315 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
316 //
test_vid_v_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,size_t vl)317 vuint16m8_t test_vid_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
318                                size_t vl) {
319   return vid_v_u16m8_m(mask, maskedoff, vl);
320 }
321 
322 //
323 // CHECK-RV64-LABEL: @test_vid_v_u32mf2_m(
324 // CHECK-RV64-NEXT:  entry:
325 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
326 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
327 //
test_vid_v_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,size_t vl)328 vuint32mf2_t test_vid_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
329                                  size_t vl) {
330   return vid_v_u32mf2_m(mask, maskedoff, vl);
331 }
332 
333 //
334 // CHECK-RV64-LABEL: @test_vid_v_u32m1_m(
335 // CHECK-RV64-NEXT:  entry:
336 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
337 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
338 //
test_vid_v_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,size_t vl)339 vuint32m1_t test_vid_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
340                                size_t vl) {
341   return vid_v_u32m1_m(mask, maskedoff, vl);
342 }
343 
344 //
345 // CHECK-RV64-LABEL: @test_vid_v_u32m2_m(
346 // CHECK-RV64-NEXT:  entry:
347 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
348 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
349 //
test_vid_v_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,size_t vl)350 vuint32m2_t test_vid_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
351                                size_t vl) {
352   return vid_v_u32m2_m(mask, maskedoff, vl);
353 }
354 
355 //
356 // CHECK-RV64-LABEL: @test_vid_v_u32m4_m(
357 // CHECK-RV64-NEXT:  entry:
358 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
359 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
360 //
test_vid_v_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,size_t vl)361 vuint32m4_t test_vid_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
362                                size_t vl) {
363   return vid_v_u32m4_m(mask, maskedoff, vl);
364 }
365 
366 //
367 // CHECK-RV64-LABEL: @test_vid_v_u32m8_m(
368 // CHECK-RV64-NEXT:  entry:
369 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
370 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
371 //
test_vid_v_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,size_t vl)372 vuint32m8_t test_vid_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
373                                size_t vl) {
374   return vid_v_u32m8_m(mask, maskedoff, vl);
375 }
376 
377 //
378 // CHECK-RV64-LABEL: @test_vid_v_u64m1_m(
379 // CHECK-RV64-NEXT:  entry:
380 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
381 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
382 //
test_vid_v_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,size_t vl)383 vuint64m1_t test_vid_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
384                                size_t vl) {
385   return vid_v_u64m1_m(mask, maskedoff, vl);
386 }
387 
388 //
389 // CHECK-RV64-LABEL: @test_vid_v_u64m2_m(
390 // CHECK-RV64-NEXT:  entry:
391 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
392 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
393 //
test_vid_v_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,size_t vl)394 vuint64m2_t test_vid_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
395                                size_t vl) {
396   return vid_v_u64m2_m(mask, maskedoff, vl);
397 }
398 
399 //
400 // CHECK-RV64-LABEL: @test_vid_v_u64m4_m(
401 // CHECK-RV64-NEXT:  entry:
402 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
403 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
404 //
test_vid_v_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,size_t vl)405 vuint64m4_t test_vid_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
406                                size_t vl) {
407   return vid_v_u64m4_m(mask, maskedoff, vl);
408 }
409 
410 //
411 // CHECK-RV64-LABEL: @test_vid_v_u64m8_m(
412 // CHECK-RV64-NEXT:  entry:
413 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
414 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
415 //
test_vid_v_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,size_t vl)416 vuint64m8_t test_vid_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
417                                size_t vl) {
418   return vid_v_u64m8_m(mask, maskedoff, vl);
419 }
420