1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8(
5  <vscale x 1 x i8>,
6  <vscale x 1 x i16>,
7  i64);
8
9define <vscale x 1 x i8> @intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
10; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8:
11; CHECK:       # %bb.0: # %entry
12; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
13; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
14; CHECK-NEXT:    vmv1r.v v8, v25
15; CHECK-NEXT:    ret
16entry:
17  %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8(
18    <vscale x 1 x i8> %0,
19    <vscale x 1 x i16> %1,
20    i64 %2)
21
22  ret <vscale x 1 x i8> %a
23}
24
25declare <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8(
26  <vscale x 1 x i8>,
27  <vscale x 1 x i8>,
28  <vscale x 1 x i16>,
29  <vscale x 1 x i1>,
30  i64);
31
32define <vscale x 1 x i8> @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
33; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8:
34; CHECK:       # %bb.0: # %entry
35; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
36; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
37; CHECK-NEXT:    ret
38entry:
39  %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8(
40    <vscale x 1 x i8> %0,
41    <vscale x 1 x i8> %1,
42    <vscale x 1 x i16> %2,
43    <vscale x 1 x i1> %3,
44    i64 %4)
45
46  ret <vscale x 1 x i8> %a
47}
48
49declare <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8(
50  <vscale x 2 x i8>,
51  <vscale x 2 x i16>,
52  i64);
53
54define <vscale x 2 x i8> @intrinsic_vrgatherei16_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
55; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv2i8_nxv2i8:
56; CHECK:       # %bb.0: # %entry
57; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
58; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
59; CHECK-NEXT:    vmv1r.v v8, v25
60; CHECK-NEXT:    ret
61entry:
62  %a = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8(
63    <vscale x 2 x i8> %0,
64    <vscale x 2 x i16> %1,
65    i64 %2)
66
67  ret <vscale x 2 x i8> %a
68}
69
70declare <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv2i8(
71  <vscale x 2 x i8>,
72  <vscale x 2 x i8>,
73  <vscale x 2 x i16>,
74  <vscale x 2 x i1>,
75  i64);
76
77define <vscale x 2 x i8> @intrinsic_vrgatherei16_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
78; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv2i8_nxv2i8:
79; CHECK:       # %bb.0: # %entry
80; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
81; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
82; CHECK-NEXT:    ret
83entry:
84  %a = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv2i8(
85    <vscale x 2 x i8> %0,
86    <vscale x 2 x i8> %1,
87    <vscale x 2 x i16> %2,
88    <vscale x 2 x i1> %3,
89    i64 %4)
90
91  ret <vscale x 2 x i8> %a
92}
93
94declare <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8(
95  <vscale x 4 x i8>,
96  <vscale x 4 x i16>,
97  i64);
98
99define <vscale x 4 x i8> @intrinsic_vrgatherei16_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
100; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i8_nxv4i8:
101; CHECK:       # %bb.0: # %entry
102; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
103; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
104; CHECK-NEXT:    vmv1r.v v8, v25
105; CHECK-NEXT:    ret
106entry:
107  %a = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8(
108    <vscale x 4 x i8> %0,
109    <vscale x 4 x i16> %1,
110    i64 %2)
111
112  ret <vscale x 4 x i8> %a
113}
114
115declare <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv4i8(
116  <vscale x 4 x i8>,
117  <vscale x 4 x i8>,
118  <vscale x 4 x i16>,
119  <vscale x 4 x i1>,
120  i64);
121
122define <vscale x 4 x i8> @intrinsic_vrgatherei16_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
123; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i8_nxv4i8:
124; CHECK:       # %bb.0: # %entry
125; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
126; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
127; CHECK-NEXT:    ret
128entry:
129  %a = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv4i8(
130    <vscale x 4 x i8> %0,
131    <vscale x 4 x i8> %1,
132    <vscale x 4 x i16> %2,
133    <vscale x 4 x i1> %3,
134    i64 %4)
135
136  ret <vscale x 4 x i8> %a
137}
138
139declare <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8(
140  <vscale x 8 x i8>,
141  <vscale x 8 x i16>,
142  i64);
143
144define <vscale x 8 x i8> @intrinsic_vrgatherei16_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
145; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i8_nxv8i8:
146; CHECK:       # %bb.0: # %entry
147; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
148; CHECK-NEXT:    vrgatherei16.vv v25, v8, v10
149; CHECK-NEXT:    vmv1r.v v8, v25
150; CHECK-NEXT:    ret
151entry:
152  %a = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8(
153    <vscale x 8 x i8> %0,
154    <vscale x 8 x i16> %1,
155    i64 %2)
156
157  ret <vscale x 8 x i8> %a
158}
159
160declare <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv8i8(
161  <vscale x 8 x i8>,
162  <vscale x 8 x i8>,
163  <vscale x 8 x i16>,
164  <vscale x 8 x i1>,
165  i64);
166
167define <vscale x 8 x i8> @intrinsic_vrgatherei16_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
168; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i8_nxv8i8:
169; CHECK:       # %bb.0: # %entry
170; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
171; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
172; CHECK-NEXT:    ret
173entry:
174  %a = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv8i8(
175    <vscale x 8 x i8> %0,
176    <vscale x 8 x i8> %1,
177    <vscale x 8 x i16> %2,
178    <vscale x 8 x i1> %3,
179    i64 %4)
180
181  ret <vscale x 8 x i8> %a
182}
183
184declare <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8(
185  <vscale x 16 x i8>,
186  <vscale x 16 x i16>,
187  i64);
188
189define <vscale x 16 x i8> @intrinsic_vrgatherei16_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
190; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16i8_nxv16i8:
191; CHECK:       # %bb.0: # %entry
192; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
193; CHECK-NEXT:    vrgatherei16.vv v26, v8, v12
194; CHECK-NEXT:    vmv2r.v v8, v26
195; CHECK-NEXT:    ret
196entry:
197  %a = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8(
198    <vscale x 16 x i8> %0,
199    <vscale x 16 x i16> %1,
200    i64 %2)
201
202  ret <vscale x 16 x i8> %a
203}
204
205declare <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv16i8(
206  <vscale x 16 x i8>,
207  <vscale x 16 x i8>,
208  <vscale x 16 x i16>,
209  <vscale x 16 x i1>,
210  i64);
211
212define <vscale x 16 x i8> @intrinsic_vrgatherei16_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
213; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i8_nxv16i8:
214; CHECK:       # %bb.0: # %entry
215; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
216; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
217; CHECK-NEXT:    ret
218entry:
219  %a = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv16i8(
220    <vscale x 16 x i8> %0,
221    <vscale x 16 x i8> %1,
222    <vscale x 16 x i16> %2,
223    <vscale x 16 x i1> %3,
224    i64 %4)
225
226  ret <vscale x 16 x i8> %a
227}
228
229declare <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8(
230  <vscale x 32 x i8>,
231  <vscale x 32 x i16>,
232  i64);
233
234define <vscale x 32 x i8> @intrinsic_vrgatherei16_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
235; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv32i8_nxv32i8:
236; CHECK:       # %bb.0: # %entry
237; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
238; CHECK-NEXT:    vrgatherei16.vv v28, v8, v16
239; CHECK-NEXT:    vmv4r.v v8, v28
240; CHECK-NEXT:    ret
241entry:
242  %a = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8(
243    <vscale x 32 x i8> %0,
244    <vscale x 32 x i16> %1,
245    i64 %2)
246
247  ret <vscale x 32 x i8> %a
248}
249
250declare <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv32i8(
251  <vscale x 32 x i8>,
252  <vscale x 32 x i8>,
253  <vscale x 32 x i16>,
254  <vscale x 32 x i1>,
255  i64);
256
257define <vscale x 32 x i8> @intrinsic_vrgatherei16_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
258; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32i8_nxv32i8:
259; CHECK:       # %bb.0: # %entry
260; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
261; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
262; CHECK-NEXT:    ret
263entry:
264  %a = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv32i8(
265    <vscale x 32 x i8> %0,
266    <vscale x 32 x i8> %1,
267    <vscale x 32 x i16> %2,
268    <vscale x 32 x i1> %3,
269    i64 %4)
270
271  ret <vscale x 32 x i8> %a
272}
273
274declare <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16(
275  <vscale x 1 x i16>,
276  <vscale x 1 x i16>,
277  i64);
278
279define <vscale x 1 x i16> @intrinsic_vrgatherei16_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
280; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i16_nxv1i16:
281; CHECK:       # %bb.0: # %entry
282; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
283; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
284; CHECK-NEXT:    vmv1r.v v8, v25
285; CHECK-NEXT:    ret
286entry:
287  %a = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16(
288    <vscale x 1 x i16> %0,
289    <vscale x 1 x i16> %1,
290    i64 %2)
291
292  ret <vscale x 1 x i16> %a
293}
294
295declare <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv1i16(
296  <vscale x 1 x i16>,
297  <vscale x 1 x i16>,
298  <vscale x 1 x i16>,
299  <vscale x 1 x i1>,
300  i64);
301
302define <vscale x 1 x i16> @intrinsic_vrgatherei16_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
303; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i16_nxv1i16:
304; CHECK:       # %bb.0: # %entry
305; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
306; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
307; CHECK-NEXT:    ret
308entry:
309  %a = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv1i16(
310    <vscale x 1 x i16> %0,
311    <vscale x 1 x i16> %1,
312    <vscale x 1 x i16> %2,
313    <vscale x 1 x i1> %3,
314    i64 %4)
315
316  ret <vscale x 1 x i16> %a
317}
318
319declare <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16(
320  <vscale x 2 x i16>,
321  <vscale x 2 x i16>,
322  i64);
323
324define <vscale x 2 x i16> @intrinsic_vrgatherei16_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
325; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv2i16_nxv2i16:
326; CHECK:       # %bb.0: # %entry
327; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
328; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
329; CHECK-NEXT:    vmv1r.v v8, v25
330; CHECK-NEXT:    ret
331entry:
332  %a = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16(
333    <vscale x 2 x i16> %0,
334    <vscale x 2 x i16> %1,
335    i64 %2)
336
337  ret <vscale x 2 x i16> %a
338}
339
340declare <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv2i16(
341  <vscale x 2 x i16>,
342  <vscale x 2 x i16>,
343  <vscale x 2 x i16>,
344  <vscale x 2 x i1>,
345  i64);
346
347define <vscale x 2 x i16> @intrinsic_vrgatherei16_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
348; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv2i16_nxv2i16:
349; CHECK:       # %bb.0: # %entry
350; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
351; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
352; CHECK-NEXT:    ret
353entry:
354  %a = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv2i16(
355    <vscale x 2 x i16> %0,
356    <vscale x 2 x i16> %1,
357    <vscale x 2 x i16> %2,
358    <vscale x 2 x i1> %3,
359    i64 %4)
360
361  ret <vscale x 2 x i16> %a
362}
363
364declare <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16(
365  <vscale x 4 x i16>,
366  <vscale x 4 x i16>,
367  i64);
368
369define <vscale x 4 x i16> @intrinsic_vrgatherei16_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
370; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i16_nxv4i16:
371; CHECK:       # %bb.0: # %entry
372; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
373; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
374; CHECK-NEXT:    vmv1r.v v8, v25
375; CHECK-NEXT:    ret
376entry:
377  %a = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16(
378    <vscale x 4 x i16> %0,
379    <vscale x 4 x i16> %1,
380    i64 %2)
381
382  ret <vscale x 4 x i16> %a
383}
384
385declare <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv4i16(
386  <vscale x 4 x i16>,
387  <vscale x 4 x i16>,
388  <vscale x 4 x i16>,
389  <vscale x 4 x i1>,
390  i64);
391
392define <vscale x 4 x i16> @intrinsic_vrgatherei16_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
393; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i16_nxv4i16:
394; CHECK:       # %bb.0: # %entry
395; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
396; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
397; CHECK-NEXT:    ret
398entry:
399  %a = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv4i16(
400    <vscale x 4 x i16> %0,
401    <vscale x 4 x i16> %1,
402    <vscale x 4 x i16> %2,
403    <vscale x 4 x i1> %3,
404    i64 %4)
405
406  ret <vscale x 4 x i16> %a
407}
408
409declare <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16(
410  <vscale x 8 x i16>,
411  <vscale x 8 x i16>,
412  i64);
413
414define <vscale x 8 x i16> @intrinsic_vrgatherei16_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
415; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i16_nxv8i16:
416; CHECK:       # %bb.0: # %entry
417; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
418; CHECK-NEXT:    vrgatherei16.vv v26, v8, v10
419; CHECK-NEXT:    vmv2r.v v8, v26
420; CHECK-NEXT:    ret
421entry:
422  %a = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16(
423    <vscale x 8 x i16> %0,
424    <vscale x 8 x i16> %1,
425    i64 %2)
426
427  ret <vscale x 8 x i16> %a
428}
429
430declare <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv8i16(
431  <vscale x 8 x i16>,
432  <vscale x 8 x i16>,
433  <vscale x 8 x i16>,
434  <vscale x 8 x i1>,
435  i64);
436
437define <vscale x 8 x i16> @intrinsic_vrgatherei16_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
438; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i16_nxv8i16:
439; CHECK:       # %bb.0: # %entry
440; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
441; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
442; CHECK-NEXT:    ret
443entry:
444  %a = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv8i16(
445    <vscale x 8 x i16> %0,
446    <vscale x 8 x i16> %1,
447    <vscale x 8 x i16> %2,
448    <vscale x 8 x i1> %3,
449    i64 %4)
450
451  ret <vscale x 8 x i16> %a
452}
453
454declare <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16(
455  <vscale x 16 x i16>,
456  <vscale x 16 x i16>,
457  i64);
458
459define <vscale x 16 x i16> @intrinsic_vrgatherei16_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
460; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16i16_nxv16i16:
461; CHECK:       # %bb.0: # %entry
462; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
463; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
464; CHECK-NEXT:    vmv4r.v v8, v28
465; CHECK-NEXT:    ret
466entry:
467  %a = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16(
468    <vscale x 16 x i16> %0,
469    <vscale x 16 x i16> %1,
470    i64 %2)
471
472  ret <vscale x 16 x i16> %a
473}
474
475declare <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv16i16(
476  <vscale x 16 x i16>,
477  <vscale x 16 x i16>,
478  <vscale x 16 x i16>,
479  <vscale x 16 x i1>,
480  i64);
481
482define <vscale x 16 x i16> @intrinsic_vrgatherei16_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
483; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i16_nxv16i16:
484; CHECK:       # %bb.0: # %entry
485; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
486; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
487; CHECK-NEXT:    ret
488entry:
489  %a = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv16i16(
490    <vscale x 16 x i16> %0,
491    <vscale x 16 x i16> %1,
492    <vscale x 16 x i16> %2,
493    <vscale x 16 x i1> %3,
494    i64 %4)
495
496  ret <vscale x 16 x i16> %a
497}
498
499declare <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16(
500  <vscale x 32 x i16>,
501  <vscale x 32 x i16>,
502  i64);
503
504define <vscale x 32 x i16> @intrinsic_vrgatherei16_vv_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
505; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv32i16_nxv32i16:
506; CHECK:       # %bb.0: # %entry
507; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
508; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
509; CHECK-NEXT:    vmv8r.v v8, v24
510; CHECK-NEXT:    ret
511entry:
512  %a = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16(
513    <vscale x 32 x i16> %0,
514    <vscale x 32 x i16> %1,
515    i64 %2)
516
517  ret <vscale x 32 x i16> %a
518}
519
520declare <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv32i16(
521  <vscale x 32 x i16>,
522  <vscale x 32 x i16>,
523  <vscale x 32 x i16>,
524  <vscale x 32 x i1>,
525  i64);
526
527define <vscale x 32 x i16> @intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
528; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16:
529; CHECK:       # %bb.0: # %entry
530; CHECK-NEXT:    vl8re16.v v24, (a0)
531; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, mu
532; CHECK-NEXT:    vrgatherei16.vv v8, v16, v24, v0.t
533; CHECK-NEXT:    ret
534entry:
535  %a = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv32i16(
536    <vscale x 32 x i16> %0,
537    <vscale x 32 x i16> %1,
538    <vscale x 32 x i16> %2,
539    <vscale x 32 x i1> %3,
540    i64 %4)
541
542  ret <vscale x 32 x i16> %a
543}
544
545declare <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32(
546  <vscale x 1 x i32>,
547  <vscale x 1 x i16>,
548  i64);
549
550define <vscale x 1 x i32> @intrinsic_vrgatherei16_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
551; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i32_nxv1i32:
552; CHECK:       # %bb.0: # %entry
553; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
554; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
555; CHECK-NEXT:    vmv1r.v v8, v25
556; CHECK-NEXT:    ret
557entry:
558  %a = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32(
559    <vscale x 1 x i32> %0,
560    <vscale x 1 x i16> %1,
561    i64 %2)
562
563  ret <vscale x 1 x i32> %a
564}
565
566declare <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv1i32(
567  <vscale x 1 x i32>,
568  <vscale x 1 x i32>,
569  <vscale x 1 x i16>,
570  <vscale x 1 x i1>,
571  i64);
572
573define <vscale x 1 x i32> @intrinsic_vrgatherei16_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
574; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i32_nxv1i32:
575; CHECK:       # %bb.0: # %entry
576; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
577; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
578; CHECK-NEXT:    ret
579entry:
580  %a = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv1i32(
581    <vscale x 1 x i32> %0,
582    <vscale x 1 x i32> %1,
583    <vscale x 1 x i16> %2,
584    <vscale x 1 x i1> %3,
585    i64 %4)
586
587  ret <vscale x 1 x i32> %a
588}
589
590declare <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32(
591  <vscale x 4 x i32>,
592  <vscale x 4 x i16>,
593  i64);
594
595define <vscale x 4 x i32> @intrinsic_vrgatherei16_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
596; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i32_nxv4i32:
597; CHECK:       # %bb.0: # %entry
598; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
599; CHECK-NEXT:    vrgatherei16.vv v26, v8, v10
600; CHECK-NEXT:    vmv2r.v v8, v26
601; CHECK-NEXT:    ret
602entry:
603  %a = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32(
604    <vscale x 4 x i32> %0,
605    <vscale x 4 x i16> %1,
606    i64 %2)
607
608  ret <vscale x 4 x i32> %a
609}
610
611declare <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv4i32(
612  <vscale x 4 x i32>,
613  <vscale x 4 x i32>,
614  <vscale x 4 x i16>,
615  <vscale x 4 x i1>,
616  i64);
617
618define <vscale x 4 x i32> @intrinsic_vrgatherei16_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
619; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i32_nxv4i32:
620; CHECK:       # %bb.0: # %entry
621; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
622; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
623; CHECK-NEXT:    ret
624entry:
625  %a = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv4i32(
626    <vscale x 4 x i32> %0,
627    <vscale x 4 x i32> %1,
628    <vscale x 4 x i16> %2,
629    <vscale x 4 x i1> %3,
630    i64 %4)
631
632  ret <vscale x 4 x i32> %a
633}
634
635declare <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32(
636  <vscale x 8 x i32>,
637  <vscale x 8 x i16>,
638  i64);
639
640define <vscale x 8 x i32> @intrinsic_vrgatherei16_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
641; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i32_nxv8i32:
642; CHECK:       # %bb.0: # %entry
643; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
644; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
645; CHECK-NEXT:    vmv4r.v v8, v28
646; CHECK-NEXT:    ret
647entry:
648  %a = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32(
649    <vscale x 8 x i32> %0,
650    <vscale x 8 x i16> %1,
651    i64 %2)
652
653  ret <vscale x 8 x i32> %a
654}
655
656declare <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv8i32(
657  <vscale x 8 x i32>,
658  <vscale x 8 x i32>,
659  <vscale x 8 x i16>,
660  <vscale x 8 x i1>,
661  i64);
662
663define <vscale x 8 x i32> @intrinsic_vrgatherei16_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
664; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i32_nxv8i32:
665; CHECK:       # %bb.0: # %entry
666; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
667; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
668; CHECK-NEXT:    ret
669entry:
670  %a = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv8i32(
671    <vscale x 8 x i32> %0,
672    <vscale x 8 x i32> %1,
673    <vscale x 8 x i16> %2,
674    <vscale x 8 x i1> %3,
675    i64 %4)
676
677  ret <vscale x 8 x i32> %a
678}
679
680declare <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32(
681  <vscale x 16 x i32>,
682  <vscale x 16 x i16>,
683  i64);
684
685define <vscale x 16 x i32> @intrinsic_vrgatherei16_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
686; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16i32_nxv16i32:
687; CHECK:       # %bb.0: # %entry
688; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
689; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
690; CHECK-NEXT:    vmv8r.v v8, v24
691; CHECK-NEXT:    ret
692entry:
693  %a = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32(
694    <vscale x 16 x i32> %0,
695    <vscale x 16 x i16> %1,
696    i64 %2)
697
698  ret <vscale x 16 x i32> %a
699}
700
701declare <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv16i32(
702  <vscale x 16 x i32>,
703  <vscale x 16 x i32>,
704  <vscale x 16 x i16>,
705  <vscale x 16 x i1>,
706  i64);
707
708define <vscale x 16 x i32> @intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
709; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32:
710; CHECK:       # %bb.0: # %entry
711; CHECK-NEXT:    vl4re16.v v28, (a0)
712; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
713; CHECK-NEXT:    vrgatherei16.vv v8, v16, v28, v0.t
714; CHECK-NEXT:    ret
715entry:
716  %a = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv16i32(
717    <vscale x 16 x i32> %0,
718    <vscale x 16 x i32> %1,
719    <vscale x 16 x i16> %2,
720    <vscale x 16 x i1> %3,
721    i64 %4)
722
723  ret <vscale x 16 x i32> %a
724}
725
726declare <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64(
727  <vscale x 4 x i64>,
728  <vscale x 4 x i16>,
729  i64);
730
731define <vscale x 4 x i64> @intrinsic_vrgatherei16_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
732; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i64_nxv4i64:
733; CHECK:       # %bb.0: # %entry
734; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
735; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
736; CHECK-NEXT:    vmv4r.v v8, v28
737; CHECK-NEXT:    ret
738entry:
739  %a = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64(
740    <vscale x 4 x i64> %0,
741    <vscale x 4 x i16> %1,
742    i64 %2)
743
744  ret <vscale x 4 x i64> %a
745}
746
747declare <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv4i64(
748  <vscale x 4 x i64>,
749  <vscale x 4 x i64>,
750  <vscale x 4 x i16>,
751  <vscale x 4 x i1>,
752  i64);
753
754define <vscale x 4 x i64> @intrinsic_vrgatherei16_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
755; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i64_nxv4i64:
756; CHECK:       # %bb.0: # %entry
757; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
758; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
759; CHECK-NEXT:    ret
760entry:
761  %a = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv4i64(
762    <vscale x 4 x i64> %0,
763    <vscale x 4 x i64> %1,
764    <vscale x 4 x i16> %2,
765    <vscale x 4 x i1> %3,
766    i64 %4)
767
768  ret <vscale x 4 x i64> %a
769}
770
771declare <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64(
772  <vscale x 8 x i64>,
773  <vscale x 8 x i16>,
774  i64);
775
776define <vscale x 8 x i64> @intrinsic_vrgatherei16_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
777; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i64_nxv8i64:
778; CHECK:       # %bb.0: # %entry
779; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
780; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
781; CHECK-NEXT:    vmv8r.v v8, v24
782; CHECK-NEXT:    ret
783entry:
784  %a = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64(
785    <vscale x 8 x i64> %0,
786    <vscale x 8 x i16> %1,
787    i64 %2)
788
789  ret <vscale x 8 x i64> %a
790}
791
792declare <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv8i64(
793  <vscale x 8 x i64>,
794  <vscale x 8 x i64>,
795  <vscale x 8 x i16>,
796  <vscale x 8 x i1>,
797  i64);
798
799define <vscale x 8 x i64> @intrinsic_vrgatherei16_mask_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
800; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i64_nxv8i64:
801; CHECK:       # %bb.0: # %entry
802; CHECK-NEXT:    vl2re16.v v26, (a0)
803; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
804; CHECK-NEXT:    vrgatherei16.vv v8, v16, v26, v0.t
805; CHECK-NEXT:    ret
806entry:
807  %a = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv8i64(
808    <vscale x 8 x i64> %0,
809    <vscale x 8 x i64> %1,
810    <vscale x 8 x i16> %2,
811    <vscale x 8 x i1> %3,
812    i64 %4)
813
814  ret <vscale x 8 x i64> %a
815}
816
817declare <vscale x 1 x half> @llvm.riscv.vrgatherei16.vv.nxv1f16(
818  <vscale x 1 x half>,
819  <vscale x 1 x i16>,
820  i64);
821
822define <vscale x 1 x half> @intrinsic_vrgatherei16_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
823; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1f16_nxv1f16:
824; CHECK:       # %bb.0: # %entry
825; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
826; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
827; CHECK-NEXT:    vmv1r.v v8, v25
828; CHECK-NEXT:    ret
829entry:
830  %a = call <vscale x 1 x half> @llvm.riscv.vrgatherei16.vv.nxv1f16(
831    <vscale x 1 x half> %0,
832    <vscale x 1 x i16> %1,
833    i64 %2)
834
835  ret <vscale x 1 x half> %a
836}
837
838declare <vscale x 1 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv1f16(
839  <vscale x 1 x half>,
840  <vscale x 1 x half>,
841  <vscale x 1 x i16>,
842  <vscale x 1 x i1>,
843  i64);
844
845define <vscale x 1 x half> @intrinsic_vrgatherei16_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
846; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1f16_nxv1f16:
847; CHECK:       # %bb.0: # %entry
848; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
849; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
850; CHECK-NEXT:    ret
851entry:
852  %a = call <vscale x 1 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv1f16(
853    <vscale x 1 x half> %0,
854    <vscale x 1 x half> %1,
855    <vscale x 1 x i16> %2,
856    <vscale x 1 x i1> %3,
857    i64 %4)
858
859  ret <vscale x 1 x half> %a
860}
861
862declare <vscale x 2 x half> @llvm.riscv.vrgatherei16.vv.nxv2f16(
863  <vscale x 2 x half>,
864  <vscale x 2 x i16>,
865  i64);
866
867define <vscale x 2 x half> @intrinsic_vrgatherei16_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
868; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv2f16_nxv2f16:
869; CHECK:       # %bb.0: # %entry
870; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
871; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
872; CHECK-NEXT:    vmv1r.v v8, v25
873; CHECK-NEXT:    ret
874entry:
875  %a = call <vscale x 2 x half> @llvm.riscv.vrgatherei16.vv.nxv2f16(
876    <vscale x 2 x half> %0,
877    <vscale x 2 x i16> %1,
878    i64 %2)
879
880  ret <vscale x 2 x half> %a
881}
882
883declare <vscale x 2 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv2f16(
884  <vscale x 2 x half>,
885  <vscale x 2 x half>,
886  <vscale x 2 x i16>,
887  <vscale x 2 x i1>,
888  i64);
889
890define <vscale x 2 x half> @intrinsic_vrgatherei16_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
891; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv2f16_nxv2f16:
892; CHECK:       # %bb.0: # %entry
893; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
894; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
895; CHECK-NEXT:    ret
896entry:
897  %a = call <vscale x 2 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv2f16(
898    <vscale x 2 x half> %0,
899    <vscale x 2 x half> %1,
900    <vscale x 2 x i16> %2,
901    <vscale x 2 x i1> %3,
902    i64 %4)
903
904  ret <vscale x 2 x half> %a
905}
906
907declare <vscale x 4 x half> @llvm.riscv.vrgatherei16.vv.nxv4f16(
908  <vscale x 4 x half>,
909  <vscale x 4 x i16>,
910  i64);
911
912define <vscale x 4 x half> @intrinsic_vrgatherei16_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
913; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4f16_nxv4f16:
914; CHECK:       # %bb.0: # %entry
915; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
916; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
917; CHECK-NEXT:    vmv1r.v v8, v25
918; CHECK-NEXT:    ret
919entry:
920  %a = call <vscale x 4 x half> @llvm.riscv.vrgatherei16.vv.nxv4f16(
921    <vscale x 4 x half> %0,
922    <vscale x 4 x i16> %1,
923    i64 %2)
924
925  ret <vscale x 4 x half> %a
926}
927
928declare <vscale x 4 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv4f16(
929  <vscale x 4 x half>,
930  <vscale x 4 x half>,
931  <vscale x 4 x i16>,
932  <vscale x 4 x i1>,
933  i64);
934
935define <vscale x 4 x half> @intrinsic_vrgatherei16_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
936; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4f16_nxv4f16:
937; CHECK:       # %bb.0: # %entry
938; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
939; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
940; CHECK-NEXT:    ret
941entry:
942  %a = call <vscale x 4 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv4f16(
943    <vscale x 4 x half> %0,
944    <vscale x 4 x half> %1,
945    <vscale x 4 x i16> %2,
946    <vscale x 4 x i1> %3,
947    i64 %4)
948
949  ret <vscale x 4 x half> %a
950}
951
952declare <vscale x 8 x half> @llvm.riscv.vrgatherei16.vv.nxv8f16(
953  <vscale x 8 x half>,
954  <vscale x 8 x i16>,
955  i64);
956
957define <vscale x 8 x half> @intrinsic_vrgatherei16_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
958; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8f16_nxv8f16:
959; CHECK:       # %bb.0: # %entry
960; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
961; CHECK-NEXT:    vrgatherei16.vv v26, v8, v10
962; CHECK-NEXT:    vmv2r.v v8, v26
963; CHECK-NEXT:    ret
964entry:
965  %a = call <vscale x 8 x half> @llvm.riscv.vrgatherei16.vv.nxv8f16(
966    <vscale x 8 x half> %0,
967    <vscale x 8 x i16> %1,
968    i64 %2)
969
970  ret <vscale x 8 x half> %a
971}
972
973declare <vscale x 8 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv8f16(
974  <vscale x 8 x half>,
975  <vscale x 8 x half>,
976  <vscale x 8 x i16>,
977  <vscale x 8 x i1>,
978  i64);
979
980define <vscale x 8 x half> @intrinsic_vrgatherei16_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
981; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f16_nxv8f16:
982; CHECK:       # %bb.0: # %entry
983; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
984; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
985; CHECK-NEXT:    ret
986entry:
987  %a = call <vscale x 8 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv8f16(
988    <vscale x 8 x half> %0,
989    <vscale x 8 x half> %1,
990    <vscale x 8 x i16> %2,
991    <vscale x 8 x i1> %3,
992    i64 %4)
993
994  ret <vscale x 8 x half> %a
995}
996
997declare <vscale x 16 x half> @llvm.riscv.vrgatherei16.vv.nxv16f16(
998  <vscale x 16 x half>,
999  <vscale x 16 x i16>,
1000  i64);
1001
1002define <vscale x 16 x half> @intrinsic_vrgatherei16_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
1003; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16f16_nxv16f16:
1004; CHECK:       # %bb.0: # %entry
1005; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
1006; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
1007; CHECK-NEXT:    vmv4r.v v8, v28
1008; CHECK-NEXT:    ret
1009entry:
1010  %a = call <vscale x 16 x half> @llvm.riscv.vrgatherei16.vv.nxv16f16(
1011    <vscale x 16 x half> %0,
1012    <vscale x 16 x i16> %1,
1013    i64 %2)
1014
1015  ret <vscale x 16 x half> %a
1016}
1017
1018declare <vscale x 16 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv16f16(
1019  <vscale x 16 x half>,
1020  <vscale x 16 x half>,
1021  <vscale x 16 x i16>,
1022  <vscale x 16 x i1>,
1023  i64);
1024
1025define <vscale x 16 x half> @intrinsic_vrgatherei16_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
1026; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16f16_nxv16f16:
1027; CHECK:       # %bb.0: # %entry
1028; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
1029; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
1030; CHECK-NEXT:    ret
1031entry:
1032  %a = call <vscale x 16 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv16f16(
1033    <vscale x 16 x half> %0,
1034    <vscale x 16 x half> %1,
1035    <vscale x 16 x i16> %2,
1036    <vscale x 16 x i1> %3,
1037    i64 %4)
1038
1039  ret <vscale x 16 x half> %a
1040}
1041
1042declare <vscale x 32 x half> @llvm.riscv.vrgatherei16.vv.nxv32f16(
1043  <vscale x 32 x half>,
1044  <vscale x 32 x i16>,
1045  i64);
1046
1047define <vscale x 32 x half> @intrinsic_vrgatherei16_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
1048; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv32f16_nxv32f16:
1049; CHECK:       # %bb.0: # %entry
1050; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
1051; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
1052; CHECK-NEXT:    vmv8r.v v8, v24
1053; CHECK-NEXT:    ret
1054entry:
1055  %a = call <vscale x 32 x half> @llvm.riscv.vrgatherei16.vv.nxv32f16(
1056    <vscale x 32 x half> %0,
1057    <vscale x 32 x i16> %1,
1058    i64 %2)
1059
1060  ret <vscale x 32 x half> %a
1061}
1062
1063declare <vscale x 32 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv32f16(
1064  <vscale x 32 x half>,
1065  <vscale x 32 x half>,
1066  <vscale x 32 x i16>,
1067  <vscale x 32 x i1>,
1068  i64);
1069
1070define <vscale x 32 x half> @intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
1071; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16:
1072; CHECK:       # %bb.0: # %entry
1073; CHECK-NEXT:    vl8re16.v v24, (a0)
1074; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, mu
1075; CHECK-NEXT:    vrgatherei16.vv v8, v16, v24, v0.t
1076; CHECK-NEXT:    ret
1077entry:
1078  %a = call <vscale x 32 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv32f16(
1079    <vscale x 32 x half> %0,
1080    <vscale x 32 x half> %1,
1081    <vscale x 32 x i16> %2,
1082    <vscale x 32 x i1> %3,
1083    i64 %4)
1084
1085  ret <vscale x 32 x half> %a
1086}
1087
1088declare <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32(
1089  <vscale x 1 x float>,
1090  <vscale x 1 x i16>,
1091  i64);
1092
1093define <vscale x 1 x float> @intrinsic_vrgatherei16_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
1094; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1f32_nxv1f32:
1095; CHECK:       # %bb.0: # %entry
1096; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
1097; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
1098; CHECK-NEXT:    vmv1r.v v8, v25
1099; CHECK-NEXT:    ret
1100entry:
1101  %a = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32(
1102    <vscale x 1 x float> %0,
1103    <vscale x 1 x i16> %1,
1104    i64 %2)
1105
1106  ret <vscale x 1 x float> %a
1107}
1108
1109declare <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv1f32(
1110  <vscale x 1 x float>,
1111  <vscale x 1 x float>,
1112  <vscale x 1 x i16>,
1113  <vscale x 1 x i1>,
1114  i64);
1115
1116define <vscale x 1 x float> @intrinsic_vrgatherei16_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1117; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1f32_nxv1f32:
1118; CHECK:       # %bb.0: # %entry
1119; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
1120; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
1121; CHECK-NEXT:    ret
1122entry:
1123  %a = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv1f32(
1124    <vscale x 1 x float> %0,
1125    <vscale x 1 x float> %1,
1126    <vscale x 1 x i16> %2,
1127    <vscale x 1 x i1> %3,
1128    i64 %4)
1129
1130  ret <vscale x 1 x float> %a
1131}
1132
1133declare <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.nxv4f32(
1134  <vscale x 4 x float>,
1135  <vscale x 4 x i16>,
1136  i64);
1137
1138define <vscale x 4 x float> @intrinsic_vrgatherei16_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
1139; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4f32_nxv4f32:
1140; CHECK:       # %bb.0: # %entry
1141; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
1142; CHECK-NEXT:    vrgatherei16.vv v26, v8, v10
1143; CHECK-NEXT:    vmv2r.v v8, v26
1144; CHECK-NEXT:    ret
1145entry:
1146  %a = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.nxv4f32(
1147    <vscale x 4 x float> %0,
1148    <vscale x 4 x i16> %1,
1149    i64 %2)
1150
1151  ret <vscale x 4 x float> %a
1152}
1153
1154declare <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv4f32(
1155  <vscale x 4 x float>,
1156  <vscale x 4 x float>,
1157  <vscale x 4 x i16>,
1158  <vscale x 4 x i1>,
1159  i64);
1160
1161define <vscale x 4 x float> @intrinsic_vrgatherei16_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1162; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4f32_nxv4f32:
1163; CHECK:       # %bb.0: # %entry
1164; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
1165; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
1166; CHECK-NEXT:    ret
1167entry:
1168  %a = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv4f32(
1169    <vscale x 4 x float> %0,
1170    <vscale x 4 x float> %1,
1171    <vscale x 4 x i16> %2,
1172    <vscale x 4 x i1> %3,
1173    i64 %4)
1174
1175  ret <vscale x 4 x float> %a
1176}
1177
1178declare <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.nxv8f32(
1179  <vscale x 8 x float>,
1180  <vscale x 8 x i16>,
1181  i64);
1182
1183define <vscale x 8 x float> @intrinsic_vrgatherei16_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
1184; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8f32_nxv8f32:
1185; CHECK:       # %bb.0: # %entry
1186; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
1187; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
1188; CHECK-NEXT:    vmv4r.v v8, v28
1189; CHECK-NEXT:    ret
1190entry:
1191  %a = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.nxv8f32(
1192    <vscale x 8 x float> %0,
1193    <vscale x 8 x i16> %1,
1194    i64 %2)
1195
1196  ret <vscale x 8 x float> %a
1197}
1198
1199declare <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv8f32(
1200  <vscale x 8 x float>,
1201  <vscale x 8 x float>,
1202  <vscale x 8 x i16>,
1203  <vscale x 8 x i1>,
1204  i64);
1205
1206define <vscale x 8 x float> @intrinsic_vrgatherei16_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1207; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f32_nxv8f32:
1208; CHECK:       # %bb.0: # %entry
1209; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
1210; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
1211; CHECK-NEXT:    ret
1212entry:
1213  %a = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv8f32(
1214    <vscale x 8 x float> %0,
1215    <vscale x 8 x float> %1,
1216    <vscale x 8 x i16> %2,
1217    <vscale x 8 x i1> %3,
1218    i64 %4)
1219
1220  ret <vscale x 8 x float> %a
1221}
1222
1223declare <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.nxv16f32(
1224  <vscale x 16 x float>,
1225  <vscale x 16 x i16>,
1226  i64);
1227
1228define <vscale x 16 x float> @intrinsic_vrgatherei16_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
1229; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16f32_nxv16f32:
1230; CHECK:       # %bb.0: # %entry
1231; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
1232; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
1233; CHECK-NEXT:    vmv8r.v v8, v24
1234; CHECK-NEXT:    ret
1235entry:
1236  %a = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.nxv16f32(
1237    <vscale x 16 x float> %0,
1238    <vscale x 16 x i16> %1,
1239    i64 %2)
1240
1241  ret <vscale x 16 x float> %a
1242}
1243
1244declare <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv16f32(
1245  <vscale x 16 x float>,
1246  <vscale x 16 x float>,
1247  <vscale x 16 x i16>,
1248  <vscale x 16 x i1>,
1249  i64);
1250
1251define <vscale x 16 x float> @intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
1252; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32:
1253; CHECK:       # %bb.0: # %entry
1254; CHECK-NEXT:    vl4re16.v v28, (a0)
1255; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
1256; CHECK-NEXT:    vrgatherei16.vv v8, v16, v28, v0.t
1257; CHECK-NEXT:    ret
1258entry:
1259  %a = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv16f32(
1260    <vscale x 16 x float> %0,
1261    <vscale x 16 x float> %1,
1262    <vscale x 16 x i16> %2,
1263    <vscale x 16 x i1> %3,
1264    i64 %4)
1265
1266  ret <vscale x 16 x float> %a
1267}
1268
1269declare <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.nxv4f64(
1270  <vscale x 4 x double>,
1271  <vscale x 4 x i16>,
1272  i64);
1273
1274define <vscale x 4 x double> @intrinsic_vrgatherei16_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
1275; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4f64_nxv4f64:
1276; CHECK:       # %bb.0: # %entry
1277; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
1278; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
1279; CHECK-NEXT:    vmv4r.v v8, v28
1280; CHECK-NEXT:    ret
1281entry:
1282  %a = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.nxv4f64(
1283    <vscale x 4 x double> %0,
1284    <vscale x 4 x i16> %1,
1285    i64 %2)
1286
1287  ret <vscale x 4 x double> %a
1288}
1289
1290declare <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv4f64(
1291  <vscale x 4 x double>,
1292  <vscale x 4 x double>,
1293  <vscale x 4 x i16>,
1294  <vscale x 4 x i1>,
1295  i64);
1296
1297define <vscale x 4 x double> @intrinsic_vrgatherei16_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1298; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4f64_nxv4f64:
1299; CHECK:       # %bb.0: # %entry
1300; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
1301; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
1302; CHECK-NEXT:    ret
1303entry:
1304  %a = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv4f64(
1305    <vscale x 4 x double> %0,
1306    <vscale x 4 x double> %1,
1307    <vscale x 4 x i16> %2,
1308    <vscale x 4 x i1> %3,
1309    i64 %4)
1310
1311  ret <vscale x 4 x double> %a
1312}
1313
1314declare <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.nxv8f64(
1315  <vscale x 8 x double>,
1316  <vscale x 8 x i16>,
1317  i64);
1318
1319define <vscale x 8 x double> @intrinsic_vrgatherei16_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
1320; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8f64_nxv8f64:
1321; CHECK:       # %bb.0: # %entry
1322; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
1323; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
1324; CHECK-NEXT:    vmv8r.v v8, v24
1325; CHECK-NEXT:    ret
1326entry:
1327  %a = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.nxv8f64(
1328    <vscale x 8 x double> %0,
1329    <vscale x 8 x i16> %1,
1330    i64 %2)
1331
1332  ret <vscale x 8 x double> %a
1333}
1334
1335declare <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv8f64(
1336  <vscale x 8 x double>,
1337  <vscale x 8 x double>,
1338  <vscale x 8 x i16>,
1339  <vscale x 8 x i1>,
1340  i64);
1341
1342define <vscale x 8 x double> @intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1343; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64:
1344; CHECK:       # %bb.0: # %entry
1345; CHECK-NEXT:    vl2re16.v v26, (a0)
1346; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
1347; CHECK-NEXT:    vrgatherei16.vv v8, v16, v26, v0.t
1348; CHECK-NEXT:    ret
1349entry:
1350  %a = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv8f64(
1351    <vscale x 8 x double> %0,
1352    <vscale x 8 x double> %1,
1353    <vscale x 8 x i16> %2,
1354    <vscale x 8 x i1> %3,
1355    i64 %4)
1356
1357  ret <vscale x 8 x double> %a
1358}
1359