1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
4 
5 #include <riscv_vector.h>
6 
7 //
8 // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4(
9 // CHECK-RV64-NEXT:  entry:
10 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
11 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
12 //
test_vsext_vf2_i16mf4(vint8mf8_t op1,size_t vl)13 vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) {
14   return vsext_vf2_i16mf4(op1, vl);
15 }
16 
17 //
18 // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2(
19 // CHECK-RV64-NEXT:  entry:
20 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
21 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
22 //
test_vsext_vf2_i16mf2(vint8mf4_t op1,size_t vl)23 vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) {
24   return vsext_vf2_i16mf2(op1, vl);
25 }
26 
27 //
28 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m1(
29 // CHECK-RV64-NEXT:  entry:
30 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
31 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
32 //
test_vsext_vf2_i16m1(vint8mf2_t op1,size_t vl)33 vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) {
34   return vsext_vf2_i16m1(op1, vl);
35 }
36 
37 //
38 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m2(
39 // CHECK-RV64-NEXT:  entry:
40 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
41 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
42 //
test_vsext_vf2_i16m2(vint8m1_t op1,size_t vl)43 vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) {
44   return vsext_vf2_i16m2(op1, vl);
45 }
46 
47 //
48 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m4(
49 // CHECK-RV64-NEXT:  entry:
50 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
51 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
52 //
test_vsext_vf2_i16m4(vint8m2_t op1,size_t vl)53 vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) {
54   return vsext_vf2_i16m4(op1, vl);
55 }
56 
57 //
58 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m8(
59 // CHECK-RV64-NEXT:  entry:
60 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
61 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
62 //
test_vsext_vf2_i16m8(vint8m4_t op1,size_t vl)63 vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) {
64   return vsext_vf2_i16m8(op1, vl);
65 }
66 
67 //
68 // CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2(
69 // CHECK-RV64-NEXT:  entry:
70 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
71 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
72 //
test_vsext_vf4_i32mf2(vint8mf8_t op1,size_t vl)73 vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) {
74   return vsext_vf4_i32mf2(op1, vl);
75 }
76 
77 //
78 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m1(
79 // CHECK-RV64-NEXT:  entry:
80 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
81 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
82 //
test_vsext_vf4_i32m1(vint8mf4_t op1,size_t vl)83 vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) {
84   return vsext_vf4_i32m1(op1, vl);
85 }
86 
87 //
88 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m2(
89 // CHECK-RV64-NEXT:  entry:
90 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
91 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
92 //
test_vsext_vf4_i32m2(vint8mf2_t op1,size_t vl)93 vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) {
94   return vsext_vf4_i32m2(op1, vl);
95 }
96 
97 //
98 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m4(
99 // CHECK-RV64-NEXT:  entry:
100 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
101 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
102 //
test_vsext_vf4_i32m4(vint8m1_t op1,size_t vl)103 vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) {
104   return vsext_vf4_i32m4(op1, vl);
105 }
106 
107 //
108 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m8(
109 // CHECK-RV64-NEXT:  entry:
110 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
111 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
112 //
test_vsext_vf4_i32m8(vint8m2_t op1,size_t vl)113 vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) {
114   return vsext_vf4_i32m8(op1, vl);
115 }
116 
117 //
118 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1(
119 // CHECK-RV64-NEXT:  entry:
120 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
121 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
122 //
test_vsext_vf8_i64m1(vint8mf8_t op1,size_t vl)123 vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) {
124   return vsext_vf8_i64m1(op1, vl);
125 }
126 
127 //
128 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2(
129 // CHECK-RV64-NEXT:  entry:
130 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
131 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
132 //
test_vsext_vf8_i64m2(vint8mf4_t op1,size_t vl)133 vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) {
134   return vsext_vf8_i64m2(op1, vl);
135 }
136 
137 //
138 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4(
139 // CHECK-RV64-NEXT:  entry:
140 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
141 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
142 //
test_vsext_vf8_i64m4(vint8mf2_t op1,size_t vl)143 vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) {
144   return vsext_vf8_i64m4(op1, vl);
145 }
146 
147 //
148 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8(
149 // CHECK-RV64-NEXT:  entry:
150 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
151 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
152 //
test_vsext_vf8_i64m8(vint8m1_t op1,size_t vl)153 vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) {
154   return vsext_vf8_i64m8(op1, vl);
155 }
156 
157 //
158 // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2(
159 // CHECK-RV64-NEXT:  entry:
160 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
161 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
162 //
test_vsext_vf2_i32mf2(vint16mf4_t op1,size_t vl)163 vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) {
164   return vsext_vf2_i32mf2(op1, vl);
165 }
166 
167 //
168 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m1(
169 // CHECK-RV64-NEXT:  entry:
170 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
171 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
172 //
test_vsext_vf2_i32m1(vint16mf2_t op1,size_t vl)173 vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) {
174   return vsext_vf2_i32m1(op1, vl);
175 }
176 
177 //
178 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m2(
179 // CHECK-RV64-NEXT:  entry:
180 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
181 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
182 //
test_vsext_vf2_i32m2(vint16m1_t op1,size_t vl)183 vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) {
184   return vsext_vf2_i32m2(op1, vl);
185 }
186 
187 //
188 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m4(
189 // CHECK-RV64-NEXT:  entry:
190 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
191 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
192 //
test_vsext_vf2_i32m4(vint16m2_t op1,size_t vl)193 vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) {
194   return vsext_vf2_i32m4(op1, vl);
195 }
196 
197 //
198 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m8(
199 // CHECK-RV64-NEXT:  entry:
200 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
201 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
202 //
test_vsext_vf2_i32m8(vint16m4_t op1,size_t vl)203 vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) {
204   return vsext_vf2_i32m8(op1, vl);
205 }
206 
207 //
208 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m1(
209 // CHECK-RV64-NEXT:  entry:
210 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
211 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
212 //
test_vsext_vf4_i64m1(vint16mf4_t op1,size_t vl)213 vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) {
214   return vsext_vf4_i64m1(op1, vl);
215 }
216 
217 //
218 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m2(
219 // CHECK-RV64-NEXT:  entry:
220 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
221 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
222 //
test_vsext_vf4_i64m2(vint16mf2_t op1,size_t vl)223 vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) {
224   return vsext_vf4_i64m2(op1, vl);
225 }
226 
227 //
228 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m4(
229 // CHECK-RV64-NEXT:  entry:
230 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
231 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
232 //
test_vsext_vf4_i64m4(vint16m1_t op1,size_t vl)233 vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) {
234   return vsext_vf4_i64m4(op1, vl);
235 }
236 
237 //
238 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m8(
239 // CHECK-RV64-NEXT:  entry:
240 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
241 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
242 //
test_vsext_vf4_i64m8(vint16m2_t op1,size_t vl)243 vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) {
244   return vsext_vf4_i64m8(op1, vl);
245 }
246 
247 //
248 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1(
249 // CHECK-RV64-NEXT:  entry:
250 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
251 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
252 //
test_vsext_vf2_i64m1(vint32mf2_t op1,size_t vl)253 vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) {
254   return vsext_vf2_i64m1(op1, vl);
255 }
256 
257 //
258 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m2(
259 // CHECK-RV64-NEXT:  entry:
260 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
261 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
262 //
test_vsext_vf2_i64m2(vint32m1_t op1,size_t vl)263 vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) {
264   return vsext_vf2_i64m2(op1, vl);
265 }
266 
267 //
268 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m4(
269 // CHECK-RV64-NEXT:  entry:
270 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
271 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
272 //
test_vsext_vf2_i64m4(vint32m2_t op1,size_t vl)273 vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) {
274   return vsext_vf2_i64m4(op1, vl);
275 }
276 
277 //
278 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m8(
279 // CHECK-RV64-NEXT:  entry:
280 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
281 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
282 //
test_vsext_vf2_i64m8(vint32m4_t op1,size_t vl)283 vint64m8_t test_vsext_vf2_i64m8(vint32m4_t op1, size_t vl) {
284   return vsext_vf2_i64m8(op1, vl);
285 }
286 
287 //
288 // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4_m(
289 // CHECK-RV64-NEXT:  entry:
290 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
291 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
292 //
test_vsext_vf2_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint8mf8_t op1,size_t vl)293 vint16mf4_t test_vsext_vf2_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
294                                     vint8mf8_t op1, size_t vl) {
295   return vsext_vf2_i16mf4_m(mask, maskedoff, op1, vl);
296 }
297 
298 //
299 // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2_m(
300 // CHECK-RV64-NEXT:  entry:
301 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
302 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
303 //
test_vsext_vf2_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint8mf4_t op1,size_t vl)304 vint16mf2_t test_vsext_vf2_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
305                                     vint8mf4_t op1, size_t vl) {
306   return vsext_vf2_i16mf2_m(mask, maskedoff, op1, vl);
307 }
308 
309 //
310 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m1_m(
311 // CHECK-RV64-NEXT:  entry:
312 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
313 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
314 //
test_vsext_vf2_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint8mf2_t op1,size_t vl)315 vint16m1_t test_vsext_vf2_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
316                                   vint8mf2_t op1, size_t vl) {
317   return vsext_vf2_i16m1_m(mask, maskedoff, op1, vl);
318 }
319 
320 //
321 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m2_m(
322 // CHECK-RV64-NEXT:  entry:
323 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
324 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
325 //
test_vsext_vf2_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint8m1_t op1,size_t vl)326 vint16m2_t test_vsext_vf2_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
327                                   vint8m1_t op1, size_t vl) {
328   return vsext_vf2_i16m2_m(mask, maskedoff, op1, vl);
329 }
330 
331 //
332 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m4_m(
333 // CHECK-RV64-NEXT:  entry:
334 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
335 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
336 //
test_vsext_vf2_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint8m2_t op1,size_t vl)337 vint16m4_t test_vsext_vf2_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
338                                   vint8m2_t op1, size_t vl) {
339   return vsext_vf2_i16m4_m(mask, maskedoff, op1, vl);
340 }
341 
342 //
343 // CHECK-RV64-LABEL: @test_vsext_vf2_i16m8_m(
344 // CHECK-RV64-NEXT:  entry:
345 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
346 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
347 //
test_vsext_vf2_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint8m4_t op1,size_t vl)348 vint16m8_t test_vsext_vf2_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
349                                   vint8m4_t op1, size_t vl) {
350   return vsext_vf2_i16m8_m(mask, maskedoff, op1, vl);
351 }
352 
353 //
354 // CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_m(
355 // CHECK-RV64-NEXT:  entry:
356 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
357 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
358 //
test_vsext_vf4_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint8mf8_t op1,size_t vl)359 vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
360                                     vint8mf8_t op1, size_t vl) {
361   return vsext_vf4_i32mf2_m(mask, maskedoff, op1, vl);
362 }
363 
364 //
365 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_m(
366 // CHECK-RV64-NEXT:  entry:
367 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
368 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
369 //
test_vsext_vf4_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint8mf4_t op1,size_t vl)370 vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
371                                   vint8mf4_t op1, size_t vl) {
372   return vsext_vf4_i32m1_m(mask, maskedoff, op1, vl);
373 }
374 
375 //
376 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_m(
377 // CHECK-RV64-NEXT:  entry:
378 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
379 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
380 //
test_vsext_vf4_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint8mf2_t op1,size_t vl)381 vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
382                                   vint8mf2_t op1, size_t vl) {
383   return vsext_vf4_i32m2_m(mask, maskedoff, op1, vl);
384 }
385 
386 //
387 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_m(
388 // CHECK-RV64-NEXT:  entry:
389 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
390 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
391 //
test_vsext_vf4_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint8m1_t op1,size_t vl)392 vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
393                                   vint8m1_t op1, size_t vl) {
394   return vsext_vf4_i32m4_m(mask, maskedoff, op1, vl);
395 }
396 
397 //
398 // CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_m(
399 // CHECK-RV64-NEXT:  entry:
400 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
401 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
402 //
test_vsext_vf4_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint8m2_t op1,size_t vl)403 vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
404                                   vint8m2_t op1, size_t vl) {
405   return vsext_vf4_i32m8_m(mask, maskedoff, op1, vl);
406 }
407 
408 //
409 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_m(
410 // CHECK-RV64-NEXT:  entry:
411 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
412 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
413 //
test_vsext_vf8_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint8mf8_t op1,size_t vl)414 vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
415                                   vint8mf8_t op1, size_t vl) {
416   return vsext_vf8_i64m1_m(mask, maskedoff, op1, vl);
417 }
418 
419 //
420 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_m(
421 // CHECK-RV64-NEXT:  entry:
422 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
423 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
424 //
test_vsext_vf8_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint8mf4_t op1,size_t vl)425 vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
426                                   vint8mf4_t op1, size_t vl) {
427   return vsext_vf8_i64m2_m(mask, maskedoff, op1, vl);
428 }
429 
430 //
431 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_m(
432 // CHECK-RV64-NEXT:  entry:
433 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
434 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
435 //
test_vsext_vf8_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint8mf2_t op1,size_t vl)436 vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
437                                   vint8mf2_t op1, size_t vl) {
438   return vsext_vf8_i64m4_m(mask, maskedoff, op1, vl);
439 }
440 
441 //
442 // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_m(
443 // CHECK-RV64-NEXT:  entry:
444 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
445 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
446 //
test_vsext_vf8_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint8m1_t op1,size_t vl)447 vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
448                                   vint8m1_t op1, size_t vl) {
449   return vsext_vf8_i64m8_m(mask, maskedoff, op1, vl);
450 }
451 
452 //
453 // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_m(
454 // CHECK-RV64-NEXT:  entry:
455 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
456 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
457 //
test_vsext_vf2_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint16mf4_t op1,size_t vl)458 vint32mf2_t test_vsext_vf2_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
459                                     vint16mf4_t op1, size_t vl) {
460   return vsext_vf2_i32mf2_m(mask, maskedoff, op1, vl);
461 }
462 
463 //
464 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m1_m(
465 // CHECK-RV64-NEXT:  entry:
466 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
467 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
468 //
test_vsext_vf2_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint16mf2_t op1,size_t vl)469 vint32m1_t test_vsext_vf2_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
470                                   vint16mf2_t op1, size_t vl) {
471   return vsext_vf2_i32m1_m(mask, maskedoff, op1, vl);
472 }
473 
474 //
475 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m2_m(
476 // CHECK-RV64-NEXT:  entry:
477 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
478 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
479 //
test_vsext_vf2_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint16m1_t op1,size_t vl)480 vint32m2_t test_vsext_vf2_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
481                                   vint16m1_t op1, size_t vl) {
482   return vsext_vf2_i32m2_m(mask, maskedoff, op1, vl);
483 }
484 
485 //
486 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m4_m(
487 // CHECK-RV64-NEXT:  entry:
488 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
489 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
490 //
test_vsext_vf2_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint16m2_t op1,size_t vl)491 vint32m4_t test_vsext_vf2_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
492                                   vint16m2_t op1, size_t vl) {
493   return vsext_vf2_i32m4_m(mask, maskedoff, op1, vl);
494 }
495 
496 //
497 // CHECK-RV64-LABEL: @test_vsext_vf2_i32m8_m(
498 // CHECK-RV64-NEXT:  entry:
499 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
500 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
501 //
test_vsext_vf2_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint16m4_t op1,size_t vl)502 vint32m8_t test_vsext_vf2_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
503                                   vint16m4_t op1, size_t vl) {
504   return vsext_vf2_i32m8_m(mask, maskedoff, op1, vl);
505 }
506 
507 //
508 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_m(
509 // CHECK-RV64-NEXT:  entry:
510 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
511 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
512 //
test_vsext_vf4_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint16mf4_t op1,size_t vl)513 vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
514                                   vint16mf4_t op1, size_t vl) {
515   return vsext_vf4_i64m1_m(mask, maskedoff, op1, vl);
516 }
517 
518 //
519 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_m(
520 // CHECK-RV64-NEXT:  entry:
521 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
522 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
523 //
test_vsext_vf4_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint16mf2_t op1,size_t vl)524 vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
525                                   vint16mf2_t op1, size_t vl) {
526   return vsext_vf4_i64m2_m(mask, maskedoff, op1, vl);
527 }
528 
529 //
530 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_m(
531 // CHECK-RV64-NEXT:  entry:
532 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
533 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
534 //
test_vsext_vf4_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint16m1_t op1,size_t vl)535 vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
536                                   vint16m1_t op1, size_t vl) {
537   return vsext_vf4_i64m4_m(mask, maskedoff, op1, vl);
538 }
539 
540 //
541 // CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_m(
542 // CHECK-RV64-NEXT:  entry:
543 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
544 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
545 //
test_vsext_vf4_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint16m2_t op1,size_t vl)546 vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
547                                   vint16m2_t op1, size_t vl) {
548   return vsext_vf4_i64m8_m(mask, maskedoff, op1, vl);
549 }
550 
551 //
552 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_m(
553 // CHECK-RV64-NEXT:  entry:
554 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
555 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
556 //
test_vsext_vf2_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint32mf2_t op1,size_t vl)557 vint64m1_t test_vsext_vf2_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
558                                   vint32mf2_t op1, size_t vl) {
559   return vsext_vf2_i64m1_m(mask, maskedoff, op1, vl);
560 }
561 
562 //
563 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m2_m(
564 // CHECK-RV64-NEXT:  entry:
565 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
566 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
567 //
test_vsext_vf2_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint32m1_t op1,size_t vl)568 vint64m2_t test_vsext_vf2_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
569                                   vint32m1_t op1, size_t vl) {
570   return vsext_vf2_i64m2_m(mask, maskedoff, op1, vl);
571 }
572 
573 //
574 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m4_m(
575 // CHECK-RV64-NEXT:  entry:
576 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
577 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
578 //
test_vsext_vf2_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint32m2_t op1,size_t vl)579 vint64m4_t test_vsext_vf2_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
580                                   vint32m2_t op1, size_t vl) {
581   return vsext_vf2_i64m4_m(mask, maskedoff, op1, vl);
582 }
583 
584 //
585 // CHECK-RV64-LABEL: @test_vsext_vf2_i64m8_m(
586 // CHECK-RV64-NEXT:  entry:
587 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
588 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
589 //
test_vsext_vf2_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint32m4_t op1,size_t vl)590 vint64m8_t test_vsext_vf2_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
591                                   vint32m4_t op1, size_t vl) {
592   return vsext_vf2_i64m8_m(mask, maskedoff, op1, vl);
593 }
594