1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64(
5  <vscale x 1 x i8>*,
6  <vscale x 1 x i64>,
7  i32);
8
9define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
10; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64:
11; CHECK:       # %bb.0: # %entry
12; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
13; CHECK-NEXT:    vluxei64.v v25, (a0), v8
14; CHECK-NEXT:    vmv1r.v v8, v25
15; CHECK-NEXT:    ret
16entry:
17  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64(
18    <vscale x 1 x i8>* %0,
19    <vscale x 1 x i64> %1,
20    i32 %2)
21
22  ret <vscale x 1 x i8> %a
23}
24
25declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64(
26  <vscale x 1 x i8>,
27  <vscale x 1 x i8>*,
28  <vscale x 1 x i64>,
29  <vscale x 1 x i1>,
30  i32);
31
32define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
33; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64:
34; CHECK:       # %bb.0: # %entry
35; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
36; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
37; CHECK-NEXT:    ret
38entry:
39  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64(
40    <vscale x 1 x i8> %0,
41    <vscale x 1 x i8>* %1,
42    <vscale x 1 x i64> %2,
43    <vscale x 1 x i1> %3,
44    i32 %4)
45
46  ret <vscale x 1 x i8> %a
47}
48
49declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64(
50  <vscale x 2 x i8>*,
51  <vscale x 2 x i64>,
52  i32);
53
54define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
55; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64:
56; CHECK:       # %bb.0: # %entry
57; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
58; CHECK-NEXT:    vluxei64.v v25, (a0), v8
59; CHECK-NEXT:    vmv1r.v v8, v25
60; CHECK-NEXT:    ret
61entry:
62  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64(
63    <vscale x 2 x i8>* %0,
64    <vscale x 2 x i64> %1,
65    i32 %2)
66
67  ret <vscale x 2 x i8> %a
68}
69
70declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64(
71  <vscale x 2 x i8>,
72  <vscale x 2 x i8>*,
73  <vscale x 2 x i64>,
74  <vscale x 2 x i1>,
75  i32);
76
77define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
78; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64:
79; CHECK:       # %bb.0: # %entry
80; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
81; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
82; CHECK-NEXT:    ret
83entry:
84  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64(
85    <vscale x 2 x i8> %0,
86    <vscale x 2 x i8>* %1,
87    <vscale x 2 x i64> %2,
88    <vscale x 2 x i1> %3,
89    i32 %4)
90
91  ret <vscale x 2 x i8> %a
92}
93
94declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64(
95  <vscale x 4 x i8>*,
96  <vscale x 4 x i64>,
97  i32);
98
99define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
100; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64:
101; CHECK:       # %bb.0: # %entry
102; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
103; CHECK-NEXT:    vluxei64.v v25, (a0), v8
104; CHECK-NEXT:    vmv1r.v v8, v25
105; CHECK-NEXT:    ret
106entry:
107  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64(
108    <vscale x 4 x i8>* %0,
109    <vscale x 4 x i64> %1,
110    i32 %2)
111
112  ret <vscale x 4 x i8> %a
113}
114
115declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64(
116  <vscale x 4 x i8>,
117  <vscale x 4 x i8>*,
118  <vscale x 4 x i64>,
119  <vscale x 4 x i1>,
120  i32);
121
122define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
123; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64:
124; CHECK:       # %bb.0: # %entry
125; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
126; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
127; CHECK-NEXT:    ret
128entry:
129  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64(
130    <vscale x 4 x i8> %0,
131    <vscale x 4 x i8>* %1,
132    <vscale x 4 x i64> %2,
133    <vscale x 4 x i1> %3,
134    i32 %4)
135
136  ret <vscale x 4 x i8> %a
137}
138
139declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64(
140  <vscale x 8 x i8>*,
141  <vscale x 8 x i64>,
142  i32);
143
144define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
145; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64:
146; CHECK:       # %bb.0: # %entry
147; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
148; CHECK-NEXT:    vluxei64.v v25, (a0), v8
149; CHECK-NEXT:    vmv1r.v v8, v25
150; CHECK-NEXT:    ret
151entry:
152  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64(
153    <vscale x 8 x i8>* %0,
154    <vscale x 8 x i64> %1,
155    i32 %2)
156
157  ret <vscale x 8 x i8> %a
158}
159
160declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64(
161  <vscale x 8 x i8>,
162  <vscale x 8 x i8>*,
163  <vscale x 8 x i64>,
164  <vscale x 8 x i1>,
165  i32);
166
167define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
168; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64:
169; CHECK:       # %bb.0: # %entry
170; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
171; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
172; CHECK-NEXT:    ret
173entry:
174  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64(
175    <vscale x 8 x i8> %0,
176    <vscale x 8 x i8>* %1,
177    <vscale x 8 x i64> %2,
178    <vscale x 8 x i1> %3,
179    i32 %4)
180
181  ret <vscale x 8 x i8> %a
182}
183
184declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64(
185  <vscale x 1 x i16>*,
186  <vscale x 1 x i64>,
187  i32);
188
189define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
190; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64:
191; CHECK:       # %bb.0: # %entry
192; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
193; CHECK-NEXT:    vluxei64.v v25, (a0), v8
194; CHECK-NEXT:    vmv1r.v v8, v25
195; CHECK-NEXT:    ret
196entry:
197  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64(
198    <vscale x 1 x i16>* %0,
199    <vscale x 1 x i64> %1,
200    i32 %2)
201
202  ret <vscale x 1 x i16> %a
203}
204
205declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64(
206  <vscale x 1 x i16>,
207  <vscale x 1 x i16>*,
208  <vscale x 1 x i64>,
209  <vscale x 1 x i1>,
210  i32);
211
212define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
213; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64:
214; CHECK:       # %bb.0: # %entry
215; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
216; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
217; CHECK-NEXT:    ret
218entry:
219  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64(
220    <vscale x 1 x i16> %0,
221    <vscale x 1 x i16>* %1,
222    <vscale x 1 x i64> %2,
223    <vscale x 1 x i1> %3,
224    i32 %4)
225
226  ret <vscale x 1 x i16> %a
227}
228
229declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64(
230  <vscale x 2 x i16>*,
231  <vscale x 2 x i64>,
232  i32);
233
234define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
235; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64:
236; CHECK:       # %bb.0: # %entry
237; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
238; CHECK-NEXT:    vluxei64.v v25, (a0), v8
239; CHECK-NEXT:    vmv1r.v v8, v25
240; CHECK-NEXT:    ret
241entry:
242  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64(
243    <vscale x 2 x i16>* %0,
244    <vscale x 2 x i64> %1,
245    i32 %2)
246
247  ret <vscale x 2 x i16> %a
248}
249
250declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64(
251  <vscale x 2 x i16>,
252  <vscale x 2 x i16>*,
253  <vscale x 2 x i64>,
254  <vscale x 2 x i1>,
255  i32);
256
257define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
258; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64:
259; CHECK:       # %bb.0: # %entry
260; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
261; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
262; CHECK-NEXT:    ret
263entry:
264  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64(
265    <vscale x 2 x i16> %0,
266    <vscale x 2 x i16>* %1,
267    <vscale x 2 x i64> %2,
268    <vscale x 2 x i1> %3,
269    i32 %4)
270
271  ret <vscale x 2 x i16> %a
272}
273
274declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64(
275  <vscale x 4 x i16>*,
276  <vscale x 4 x i64>,
277  i32);
278
279define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
280; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64:
281; CHECK:       # %bb.0: # %entry
282; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
283; CHECK-NEXT:    vluxei64.v v25, (a0), v8
284; CHECK-NEXT:    vmv1r.v v8, v25
285; CHECK-NEXT:    ret
286entry:
287  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64(
288    <vscale x 4 x i16>* %0,
289    <vscale x 4 x i64> %1,
290    i32 %2)
291
292  ret <vscale x 4 x i16> %a
293}
294
295declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64(
296  <vscale x 4 x i16>,
297  <vscale x 4 x i16>*,
298  <vscale x 4 x i64>,
299  <vscale x 4 x i1>,
300  i32);
301
302define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
303; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64:
304; CHECK:       # %bb.0: # %entry
305; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
306; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
307; CHECK-NEXT:    ret
308entry:
309  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64(
310    <vscale x 4 x i16> %0,
311    <vscale x 4 x i16>* %1,
312    <vscale x 4 x i64> %2,
313    <vscale x 4 x i1> %3,
314    i32 %4)
315
316  ret <vscale x 4 x i16> %a
317}
318
319declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64(
320  <vscale x 8 x i16>*,
321  <vscale x 8 x i64>,
322  i32);
323
324define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
325; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64:
326; CHECK:       # %bb.0: # %entry
327; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
328; CHECK-NEXT:    vluxei64.v v26, (a0), v8
329; CHECK-NEXT:    vmv2r.v v8, v26
330; CHECK-NEXT:    ret
331entry:
332  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64(
333    <vscale x 8 x i16>* %0,
334    <vscale x 8 x i64> %1,
335    i32 %2)
336
337  ret <vscale x 8 x i16> %a
338}
339
340declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64(
341  <vscale x 8 x i16>,
342  <vscale x 8 x i16>*,
343  <vscale x 8 x i64>,
344  <vscale x 8 x i1>,
345  i32);
346
347define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
348; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64:
349; CHECK:       # %bb.0: # %entry
350; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
351; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
352; CHECK-NEXT:    ret
353entry:
354  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64(
355    <vscale x 8 x i16> %0,
356    <vscale x 8 x i16>* %1,
357    <vscale x 8 x i64> %2,
358    <vscale x 8 x i1> %3,
359    i32 %4)
360
361  ret <vscale x 8 x i16> %a
362}
363
364declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64(
365  <vscale x 1 x i32>*,
366  <vscale x 1 x i64>,
367  i32);
368
369define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
370; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64:
371; CHECK:       # %bb.0: # %entry
372; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
373; CHECK-NEXT:    vluxei64.v v25, (a0), v8
374; CHECK-NEXT:    vmv1r.v v8, v25
375; CHECK-NEXT:    ret
376entry:
377  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64(
378    <vscale x 1 x i32>* %0,
379    <vscale x 1 x i64> %1,
380    i32 %2)
381
382  ret <vscale x 1 x i32> %a
383}
384
385declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64(
386  <vscale x 1 x i32>,
387  <vscale x 1 x i32>*,
388  <vscale x 1 x i64>,
389  <vscale x 1 x i1>,
390  i32);
391
392define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
393; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64:
394; CHECK:       # %bb.0: # %entry
395; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
396; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
397; CHECK-NEXT:    ret
398entry:
399  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64(
400    <vscale x 1 x i32> %0,
401    <vscale x 1 x i32>* %1,
402    <vscale x 1 x i64> %2,
403    <vscale x 1 x i1> %3,
404    i32 %4)
405
406  ret <vscale x 1 x i32> %a
407}
408
409declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(
410  <vscale x 2 x i32>*,
411  <vscale x 2 x i64>,
412  i32);
413
414define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
415; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64:
416; CHECK:       # %bb.0: # %entry
417; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
418; CHECK-NEXT:    vluxei64.v v25, (a0), v8
419; CHECK-NEXT:    vmv1r.v v8, v25
420; CHECK-NEXT:    ret
421entry:
422  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(
423    <vscale x 2 x i32>* %0,
424    <vscale x 2 x i64> %1,
425    i32 %2)
426
427  ret <vscale x 2 x i32> %a
428}
429
430declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64(
431  <vscale x 2 x i32>,
432  <vscale x 2 x i32>*,
433  <vscale x 2 x i64>,
434  <vscale x 2 x i1>,
435  i32);
436
437define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
438; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64:
439; CHECK:       # %bb.0: # %entry
440; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
441; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
442; CHECK-NEXT:    ret
443entry:
444  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64(
445    <vscale x 2 x i32> %0,
446    <vscale x 2 x i32>* %1,
447    <vscale x 2 x i64> %2,
448    <vscale x 2 x i1> %3,
449    i32 %4)
450
451  ret <vscale x 2 x i32> %a
452}
453
454declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64(
455  <vscale x 4 x i32>*,
456  <vscale x 4 x i64>,
457  i32);
458
459define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
460; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64:
461; CHECK:       # %bb.0: # %entry
462; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
463; CHECK-NEXT:    vluxei64.v v26, (a0), v8
464; CHECK-NEXT:    vmv2r.v v8, v26
465; CHECK-NEXT:    ret
466entry:
467  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64(
468    <vscale x 4 x i32>* %0,
469    <vscale x 4 x i64> %1,
470    i32 %2)
471
472  ret <vscale x 4 x i32> %a
473}
474
475declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64(
476  <vscale x 4 x i32>,
477  <vscale x 4 x i32>*,
478  <vscale x 4 x i64>,
479  <vscale x 4 x i1>,
480  i32);
481
482define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
483; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64:
484; CHECK:       # %bb.0: # %entry
485; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
486; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
487; CHECK-NEXT:    ret
488entry:
489  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64(
490    <vscale x 4 x i32> %0,
491    <vscale x 4 x i32>* %1,
492    <vscale x 4 x i64> %2,
493    <vscale x 4 x i1> %3,
494    i32 %4)
495
496  ret <vscale x 4 x i32> %a
497}
498
499declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64(
500  <vscale x 8 x i32>*,
501  <vscale x 8 x i64>,
502  i32);
503
504define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
505; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64:
506; CHECK:       # %bb.0: # %entry
507; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
508; CHECK-NEXT:    vluxei64.v v28, (a0), v8
509; CHECK-NEXT:    vmv4r.v v8, v28
510; CHECK-NEXT:    ret
511entry:
512  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64(
513    <vscale x 8 x i32>* %0,
514    <vscale x 8 x i64> %1,
515    i32 %2)
516
517  ret <vscale x 8 x i32> %a
518}
519
520declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64(
521  <vscale x 8 x i32>,
522  <vscale x 8 x i32>*,
523  <vscale x 8 x i64>,
524  <vscale x 8 x i1>,
525  i32);
526
527define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
528; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64:
529; CHECK:       # %bb.0: # %entry
530; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
531; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
532; CHECK-NEXT:    ret
533entry:
534  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64(
535    <vscale x 8 x i32> %0,
536    <vscale x 8 x i32>* %1,
537    <vscale x 8 x i64> %2,
538    <vscale x 8 x i1> %3,
539    i32 %4)
540
541  ret <vscale x 8 x i32> %a
542}
543
544declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64(
545  <vscale x 1 x i64>*,
546  <vscale x 1 x i64>,
547  i32);
548
549define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
550; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64:
551; CHECK:       # %bb.0: # %entry
552; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
553; CHECK-NEXT:    vluxei64.v v8, (a0), v8
554; CHECK-NEXT:    ret
555entry:
556  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64(
557    <vscale x 1 x i64>* %0,
558    <vscale x 1 x i64> %1,
559    i32 %2)
560
561  ret <vscale x 1 x i64> %a
562}
563
564declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64(
565  <vscale x 1 x i64>,
566  <vscale x 1 x i64>*,
567  <vscale x 1 x i64>,
568  <vscale x 1 x i1>,
569  i32);
570
571define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
572; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64:
573; CHECK:       # %bb.0: # %entry
574; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
575; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
576; CHECK-NEXT:    ret
577entry:
578  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64(
579    <vscale x 1 x i64> %0,
580    <vscale x 1 x i64>* %1,
581    <vscale x 1 x i64> %2,
582    <vscale x 1 x i1> %3,
583    i32 %4)
584
585  ret <vscale x 1 x i64> %a
586}
587
588declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64(
589  <vscale x 2 x i64>*,
590  <vscale x 2 x i64>,
591  i32);
592
593define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
594; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64:
595; CHECK:       # %bb.0: # %entry
596; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
597; CHECK-NEXT:    vluxei64.v v8, (a0), v8
598; CHECK-NEXT:    ret
599entry:
600  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64(
601    <vscale x 2 x i64>* %0,
602    <vscale x 2 x i64> %1,
603    i32 %2)
604
605  ret <vscale x 2 x i64> %a
606}
607
608declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64(
609  <vscale x 2 x i64>,
610  <vscale x 2 x i64>*,
611  <vscale x 2 x i64>,
612  <vscale x 2 x i1>,
613  i32);
614
615define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
616; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64:
617; CHECK:       # %bb.0: # %entry
618; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
619; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
620; CHECK-NEXT:    ret
621entry:
622  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64(
623    <vscale x 2 x i64> %0,
624    <vscale x 2 x i64>* %1,
625    <vscale x 2 x i64> %2,
626    <vscale x 2 x i1> %3,
627    i32 %4)
628
629  ret <vscale x 2 x i64> %a
630}
631
632declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64(
633  <vscale x 4 x i64>*,
634  <vscale x 4 x i64>,
635  i32);
636
637define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
638; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64:
639; CHECK:       # %bb.0: # %entry
640; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
641; CHECK-NEXT:    vluxei64.v v8, (a0), v8
642; CHECK-NEXT:    ret
643entry:
644  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64(
645    <vscale x 4 x i64>* %0,
646    <vscale x 4 x i64> %1,
647    i32 %2)
648
649  ret <vscale x 4 x i64> %a
650}
651
652declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64(
653  <vscale x 4 x i64>,
654  <vscale x 4 x i64>*,
655  <vscale x 4 x i64>,
656  <vscale x 4 x i1>,
657  i32);
658
659define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
660; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64:
661; CHECK:       # %bb.0: # %entry
662; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
663; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
664; CHECK-NEXT:    ret
665entry:
666  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64(
667    <vscale x 4 x i64> %0,
668    <vscale x 4 x i64>* %1,
669    <vscale x 4 x i64> %2,
670    <vscale x 4 x i1> %3,
671    i32 %4)
672
673  ret <vscale x 4 x i64> %a
674}
675
676declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64(
677  <vscale x 8 x i64>*,
678  <vscale x 8 x i64>,
679  i32);
680
681define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
682; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64:
683; CHECK:       # %bb.0: # %entry
684; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
685; CHECK-NEXT:    vluxei64.v v8, (a0), v8
686; CHECK-NEXT:    ret
687entry:
688  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64(
689    <vscale x 8 x i64>* %0,
690    <vscale x 8 x i64> %1,
691    i32 %2)
692
693  ret <vscale x 8 x i64> %a
694}
695
696declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64(
697  <vscale x 8 x i64>,
698  <vscale x 8 x i64>*,
699  <vscale x 8 x i64>,
700  <vscale x 8 x i1>,
701  i32);
702
703define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
704; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64:
705; CHECK:       # %bb.0: # %entry
706; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
707; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
708; CHECK-NEXT:    ret
709entry:
710  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64(
711    <vscale x 8 x i64> %0,
712    <vscale x 8 x i64>* %1,
713    <vscale x 8 x i64> %2,
714    <vscale x 8 x i1> %3,
715    i32 %4)
716
717  ret <vscale x 8 x i64> %a
718}
719
720declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i64(
721  <vscale x 1 x half>*,
722  <vscale x 1 x i64>,
723  i32);
724
725define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
726; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64:
727; CHECK:       # %bb.0: # %entry
728; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
729; CHECK-NEXT:    vluxei64.v v25, (a0), v8
730; CHECK-NEXT:    vmv1r.v v8, v25
731; CHECK-NEXT:    ret
732entry:
733  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i64(
734    <vscale x 1 x half>* %0,
735    <vscale x 1 x i64> %1,
736    i32 %2)
737
738  ret <vscale x 1 x half> %a
739}
740
741declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64(
742  <vscale x 1 x half>,
743  <vscale x 1 x half>*,
744  <vscale x 1 x i64>,
745  <vscale x 1 x i1>,
746  i32);
747
748define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
749; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64:
750; CHECK:       # %bb.0: # %entry
751; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
752; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
753; CHECK-NEXT:    ret
754entry:
755  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64(
756    <vscale x 1 x half> %0,
757    <vscale x 1 x half>* %1,
758    <vscale x 1 x i64> %2,
759    <vscale x 1 x i1> %3,
760    i32 %4)
761
762  ret <vscale x 1 x half> %a
763}
764
765declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i64(
766  <vscale x 2 x half>*,
767  <vscale x 2 x i64>,
768  i32);
769
770define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
771; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64:
772; CHECK:       # %bb.0: # %entry
773; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
774; CHECK-NEXT:    vluxei64.v v25, (a0), v8
775; CHECK-NEXT:    vmv1r.v v8, v25
776; CHECK-NEXT:    ret
777entry:
778  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i64(
779    <vscale x 2 x half>* %0,
780    <vscale x 2 x i64> %1,
781    i32 %2)
782
783  ret <vscale x 2 x half> %a
784}
785
786declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64(
787  <vscale x 2 x half>,
788  <vscale x 2 x half>*,
789  <vscale x 2 x i64>,
790  <vscale x 2 x i1>,
791  i32);
792
793define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
794; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64:
795; CHECK:       # %bb.0: # %entry
796; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
797; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
798; CHECK-NEXT:    ret
799entry:
800  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64(
801    <vscale x 2 x half> %0,
802    <vscale x 2 x half>* %1,
803    <vscale x 2 x i64> %2,
804    <vscale x 2 x i1> %3,
805    i32 %4)
806
807  ret <vscale x 2 x half> %a
808}
809
810declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i64(
811  <vscale x 4 x half>*,
812  <vscale x 4 x i64>,
813  i32);
814
815define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
816; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64:
817; CHECK:       # %bb.0: # %entry
818; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
819; CHECK-NEXT:    vluxei64.v v25, (a0), v8
820; CHECK-NEXT:    vmv1r.v v8, v25
821; CHECK-NEXT:    ret
822entry:
823  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i64(
824    <vscale x 4 x half>* %0,
825    <vscale x 4 x i64> %1,
826    i32 %2)
827
828  ret <vscale x 4 x half> %a
829}
830
831declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64(
832  <vscale x 4 x half>,
833  <vscale x 4 x half>*,
834  <vscale x 4 x i64>,
835  <vscale x 4 x i1>,
836  i32);
837
838define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
839; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64:
840; CHECK:       # %bb.0: # %entry
841; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
842; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
843; CHECK-NEXT:    ret
844entry:
845  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64(
846    <vscale x 4 x half> %0,
847    <vscale x 4 x half>* %1,
848    <vscale x 4 x i64> %2,
849    <vscale x 4 x i1> %3,
850    i32 %4)
851
852  ret <vscale x 4 x half> %a
853}
854
855declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i64(
856  <vscale x 8 x half>*,
857  <vscale x 8 x i64>,
858  i32);
859
860define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
861; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64:
862; CHECK:       # %bb.0: # %entry
863; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
864; CHECK-NEXT:    vluxei64.v v26, (a0), v8
865; CHECK-NEXT:    vmv2r.v v8, v26
866; CHECK-NEXT:    ret
867entry:
868  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i64(
869    <vscale x 8 x half>* %0,
870    <vscale x 8 x i64> %1,
871    i32 %2)
872
873  ret <vscale x 8 x half> %a
874}
875
876declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64(
877  <vscale x 8 x half>,
878  <vscale x 8 x half>*,
879  <vscale x 8 x i64>,
880  <vscale x 8 x i1>,
881  i32);
882
883define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
884; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64:
885; CHECK:       # %bb.0: # %entry
886; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
887; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
888; CHECK-NEXT:    ret
889entry:
890  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64(
891    <vscale x 8 x half> %0,
892    <vscale x 8 x half>* %1,
893    <vscale x 8 x i64> %2,
894    <vscale x 8 x i1> %3,
895    i32 %4)
896
897  ret <vscale x 8 x half> %a
898}
899
900declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64(
901  <vscale x 1 x float>*,
902  <vscale x 1 x i64>,
903  i32);
904
905define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
906; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64:
907; CHECK:       # %bb.0: # %entry
908; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
909; CHECK-NEXT:    vluxei64.v v25, (a0), v8
910; CHECK-NEXT:    vmv1r.v v8, v25
911; CHECK-NEXT:    ret
912entry:
913  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64(
914    <vscale x 1 x float>* %0,
915    <vscale x 1 x i64> %1,
916    i32 %2)
917
918  ret <vscale x 1 x float> %a
919}
920
921declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64(
922  <vscale x 1 x float>,
923  <vscale x 1 x float>*,
924  <vscale x 1 x i64>,
925  <vscale x 1 x i1>,
926  i32);
927
928define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
929; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64:
930; CHECK:       # %bb.0: # %entry
931; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
932; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
933; CHECK-NEXT:    ret
934entry:
935  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64(
936    <vscale x 1 x float> %0,
937    <vscale x 1 x float>* %1,
938    <vscale x 1 x i64> %2,
939    <vscale x 1 x i1> %3,
940    i32 %4)
941
942  ret <vscale x 1 x float> %a
943}
944
945declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64(
946  <vscale x 2 x float>*,
947  <vscale x 2 x i64>,
948  i32);
949
950define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
951; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64:
952; CHECK:       # %bb.0: # %entry
953; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
954; CHECK-NEXT:    vluxei64.v v25, (a0), v8
955; CHECK-NEXT:    vmv1r.v v8, v25
956; CHECK-NEXT:    ret
957entry:
958  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64(
959    <vscale x 2 x float>* %0,
960    <vscale x 2 x i64> %1,
961    i32 %2)
962
963  ret <vscale x 2 x float> %a
964}
965
966declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64(
967  <vscale x 2 x float>,
968  <vscale x 2 x float>*,
969  <vscale x 2 x i64>,
970  <vscale x 2 x i1>,
971  i32);
972
973define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
974; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64:
975; CHECK:       # %bb.0: # %entry
976; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
977; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
978; CHECK-NEXT:    ret
979entry:
980  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64(
981    <vscale x 2 x float> %0,
982    <vscale x 2 x float>* %1,
983    <vscale x 2 x i64> %2,
984    <vscale x 2 x i1> %3,
985    i32 %4)
986
987  ret <vscale x 2 x float> %a
988}
989
990declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64(
991  <vscale x 4 x float>*,
992  <vscale x 4 x i64>,
993  i32);
994
995define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
996; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64:
997; CHECK:       # %bb.0: # %entry
998; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
999; CHECK-NEXT:    vluxei64.v v26, (a0), v8
1000; CHECK-NEXT:    vmv2r.v v8, v26
1001; CHECK-NEXT:    ret
1002entry:
1003  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64(
1004    <vscale x 4 x float>* %0,
1005    <vscale x 4 x i64> %1,
1006    i32 %2)
1007
1008  ret <vscale x 4 x float> %a
1009}
1010
1011declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64(
1012  <vscale x 4 x float>,
1013  <vscale x 4 x float>*,
1014  <vscale x 4 x i64>,
1015  <vscale x 4 x i1>,
1016  i32);
1017
1018define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1019; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64:
1020; CHECK:       # %bb.0: # %entry
1021; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
1022; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
1023; CHECK-NEXT:    ret
1024entry:
1025  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64(
1026    <vscale x 4 x float> %0,
1027    <vscale x 4 x float>* %1,
1028    <vscale x 4 x i64> %2,
1029    <vscale x 4 x i1> %3,
1030    i32 %4)
1031
1032  ret <vscale x 4 x float> %a
1033}
1034
1035declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64(
1036  <vscale x 8 x float>*,
1037  <vscale x 8 x i64>,
1038  i32);
1039
1040define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
1041; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64:
1042; CHECK:       # %bb.0: # %entry
1043; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1044; CHECK-NEXT:    vluxei64.v v28, (a0), v8
1045; CHECK-NEXT:    vmv4r.v v8, v28
1046; CHECK-NEXT:    ret
1047entry:
1048  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64(
1049    <vscale x 8 x float>* %0,
1050    <vscale x 8 x i64> %1,
1051    i32 %2)
1052
1053  ret <vscale x 8 x float> %a
1054}
1055
1056declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64(
1057  <vscale x 8 x float>,
1058  <vscale x 8 x float>*,
1059  <vscale x 8 x i64>,
1060  <vscale x 8 x i1>,
1061  i32);
1062
1063define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1064; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64:
1065; CHECK:       # %bb.0: # %entry
1066; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
1067; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
1068; CHECK-NEXT:    ret
1069entry:
1070  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64(
1071    <vscale x 8 x float> %0,
1072    <vscale x 8 x float>* %1,
1073    <vscale x 8 x i64> %2,
1074    <vscale x 8 x i1> %3,
1075    i32 %4)
1076
1077  ret <vscale x 8 x float> %a
1078}
1079
1080declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64(
1081  <vscale x 1 x double>*,
1082  <vscale x 1 x i64>,
1083  i32);
1084
1085define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
1086; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64:
1087; CHECK:       # %bb.0: # %entry
1088; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
1089; CHECK-NEXT:    vluxei64.v v8, (a0), v8
1090; CHECK-NEXT:    ret
1091entry:
1092  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64(
1093    <vscale x 1 x double>* %0,
1094    <vscale x 1 x i64> %1,
1095    i32 %2)
1096
1097  ret <vscale x 1 x double> %a
1098}
1099
1100declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64(
1101  <vscale x 1 x double>,
1102  <vscale x 1 x double>*,
1103  <vscale x 1 x i64>,
1104  <vscale x 1 x i1>,
1105  i32);
1106
1107define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1108; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64:
1109; CHECK:       # %bb.0: # %entry
1110; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
1111; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
1112; CHECK-NEXT:    ret
1113entry:
1114  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64(
1115    <vscale x 1 x double> %0,
1116    <vscale x 1 x double>* %1,
1117    <vscale x 1 x i64> %2,
1118    <vscale x 1 x i1> %3,
1119    i32 %4)
1120
1121  ret <vscale x 1 x double> %a
1122}
1123
1124declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64(
1125  <vscale x 2 x double>*,
1126  <vscale x 2 x i64>,
1127  i32);
1128
1129define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
1130; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64:
1131; CHECK:       # %bb.0: # %entry
1132; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
1133; CHECK-NEXT:    vluxei64.v v8, (a0), v8
1134; CHECK-NEXT:    ret
1135entry:
1136  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64(
1137    <vscale x 2 x double>* %0,
1138    <vscale x 2 x i64> %1,
1139    i32 %2)
1140
1141  ret <vscale x 2 x double> %a
1142}
1143
1144declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64(
1145  <vscale x 2 x double>,
1146  <vscale x 2 x double>*,
1147  <vscale x 2 x i64>,
1148  <vscale x 2 x i1>,
1149  i32);
1150
1151define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1152; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64:
1153; CHECK:       # %bb.0: # %entry
1154; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
1155; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
1156; CHECK-NEXT:    ret
1157entry:
1158  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64(
1159    <vscale x 2 x double> %0,
1160    <vscale x 2 x double>* %1,
1161    <vscale x 2 x i64> %2,
1162    <vscale x 2 x i1> %3,
1163    i32 %4)
1164
1165  ret <vscale x 2 x double> %a
1166}
1167
1168declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64(
1169  <vscale x 4 x double>*,
1170  <vscale x 4 x i64>,
1171  i32);
1172
1173define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
1174; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64:
1175; CHECK:       # %bb.0: # %entry
1176; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
1177; CHECK-NEXT:    vluxei64.v v8, (a0), v8
1178; CHECK-NEXT:    ret
1179entry:
1180  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64(
1181    <vscale x 4 x double>* %0,
1182    <vscale x 4 x i64> %1,
1183    i32 %2)
1184
1185  ret <vscale x 4 x double> %a
1186}
1187
1188declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64(
1189  <vscale x 4 x double>,
1190  <vscale x 4 x double>*,
1191  <vscale x 4 x i64>,
1192  <vscale x 4 x i1>,
1193  i32);
1194
1195define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1196; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64:
1197; CHECK:       # %bb.0: # %entry
1198; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
1199; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
1200; CHECK-NEXT:    ret
1201entry:
1202  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64(
1203    <vscale x 4 x double> %0,
1204    <vscale x 4 x double>* %1,
1205    <vscale x 4 x i64> %2,
1206    <vscale x 4 x i1> %3,
1207    i32 %4)
1208
1209  ret <vscale x 4 x double> %a
1210}
1211
1212declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64(
1213  <vscale x 8 x double>*,
1214  <vscale x 8 x i64>,
1215  i32);
1216
1217define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
1218; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64:
1219; CHECK:       # %bb.0: # %entry
1220; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
1221; CHECK-NEXT:    vluxei64.v v8, (a0), v8
1222; CHECK-NEXT:    ret
1223entry:
1224  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64(
1225    <vscale x 8 x double>* %0,
1226    <vscale x 8 x i64> %1,
1227    i32 %2)
1228
1229  ret <vscale x 8 x double> %a
1230}
1231
1232declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64(
1233  <vscale x 8 x double>,
1234  <vscale x 8 x double>*,
1235  <vscale x 8 x i64>,
1236  <vscale x 8 x i1>,
1237  i32);
1238
1239define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1240; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64:
1241; CHECK:       # %bb.0: # %entry
1242; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
1243; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
1244; CHECK-NEXT:    ret
1245entry:
1246  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64(
1247    <vscale x 8 x double> %0,
1248    <vscale x 8 x double>* %1,
1249    <vscale x 8 x i64> %2,
1250    <vscale x 8 x i1> %3,
1251    i32 %4)
1252
1253  ret <vscale x 8 x double> %a
1254}
1255
1256declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32(
1257  <vscale x 1 x i8>*,
1258  <vscale x 1 x i32>,
1259  i32);
1260
1261define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
1262; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32:
1263; CHECK:       # %bb.0: # %entry
1264; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
1265; CHECK-NEXT:    vluxei32.v v25, (a0), v8
1266; CHECK-NEXT:    vmv1r.v v8, v25
1267; CHECK-NEXT:    ret
1268entry:
1269  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32(
1270    <vscale x 1 x i8>* %0,
1271    <vscale x 1 x i32> %1,
1272    i32 %2)
1273
1274  ret <vscale x 1 x i8> %a
1275}
1276
1277declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32(
1278  <vscale x 1 x i8>,
1279  <vscale x 1 x i8>*,
1280  <vscale x 1 x i32>,
1281  <vscale x 1 x i1>,
1282  i32);
1283
1284define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1285; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
1286; CHECK:       # %bb.0: # %entry
1287; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
1288; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
1289; CHECK-NEXT:    ret
1290entry:
1291  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32(
1292    <vscale x 1 x i8> %0,
1293    <vscale x 1 x i8>* %1,
1294    <vscale x 1 x i32> %2,
1295    <vscale x 1 x i1> %3,
1296    i32 %4)
1297
1298  ret <vscale x 1 x i8> %a
1299}
1300
1301declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32(
1302  <vscale x 2 x i8>*,
1303  <vscale x 2 x i32>,
1304  i32);
1305
1306define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
1307; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32:
1308; CHECK:       # %bb.0: # %entry
1309; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
1310; CHECK-NEXT:    vluxei32.v v25, (a0), v8
1311; CHECK-NEXT:    vmv1r.v v8, v25
1312; CHECK-NEXT:    ret
1313entry:
1314  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32(
1315    <vscale x 2 x i8>* %0,
1316    <vscale x 2 x i32> %1,
1317    i32 %2)
1318
1319  ret <vscale x 2 x i8> %a
1320}
1321
1322declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32(
1323  <vscale x 2 x i8>,
1324  <vscale x 2 x i8>*,
1325  <vscale x 2 x i32>,
1326  <vscale x 2 x i1>,
1327  i32);
1328
1329define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1330; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
1331; CHECK:       # %bb.0: # %entry
1332; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
1333; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
1334; CHECK-NEXT:    ret
1335entry:
1336  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32(
1337    <vscale x 2 x i8> %0,
1338    <vscale x 2 x i8>* %1,
1339    <vscale x 2 x i32> %2,
1340    <vscale x 2 x i1> %3,
1341    i32 %4)
1342
1343  ret <vscale x 2 x i8> %a
1344}
1345
1346declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32(
1347  <vscale x 4 x i8>*,
1348  <vscale x 4 x i32>,
1349  i32);
1350
1351define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
1352; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32:
1353; CHECK:       # %bb.0: # %entry
1354; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
1355; CHECK-NEXT:    vluxei32.v v25, (a0), v8
1356; CHECK-NEXT:    vmv1r.v v8, v25
1357; CHECK-NEXT:    ret
1358entry:
1359  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32(
1360    <vscale x 4 x i8>* %0,
1361    <vscale x 4 x i32> %1,
1362    i32 %2)
1363
1364  ret <vscale x 4 x i8> %a
1365}
1366
1367declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32(
1368  <vscale x 4 x i8>,
1369  <vscale x 4 x i8>*,
1370  <vscale x 4 x i32>,
1371  <vscale x 4 x i1>,
1372  i32);
1373
1374define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1375; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
1376; CHECK:       # %bb.0: # %entry
1377; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
1378; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
1379; CHECK-NEXT:    ret
1380entry:
1381  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32(
1382    <vscale x 4 x i8> %0,
1383    <vscale x 4 x i8>* %1,
1384    <vscale x 4 x i32> %2,
1385    <vscale x 4 x i1> %3,
1386    i32 %4)
1387
1388  ret <vscale x 4 x i8> %a
1389}
1390
1391declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32(
1392  <vscale x 8 x i8>*,
1393  <vscale x 8 x i32>,
1394  i32);
1395
1396define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
1397; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32:
1398; CHECK:       # %bb.0: # %entry
1399; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
1400; CHECK-NEXT:    vluxei32.v v25, (a0), v8
1401; CHECK-NEXT:    vmv1r.v v8, v25
1402; CHECK-NEXT:    ret
1403entry:
1404  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32(
1405    <vscale x 8 x i8>* %0,
1406    <vscale x 8 x i32> %1,
1407    i32 %2)
1408
1409  ret <vscale x 8 x i8> %a
1410}
1411
1412declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32(
1413  <vscale x 8 x i8>,
1414  <vscale x 8 x i8>*,
1415  <vscale x 8 x i32>,
1416  <vscale x 8 x i1>,
1417  i32);
1418
1419define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1420; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
1421; CHECK:       # %bb.0: # %entry
1422; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
1423; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
1424; CHECK-NEXT:    ret
1425entry:
1426  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32(
1427    <vscale x 8 x i8> %0,
1428    <vscale x 8 x i8>* %1,
1429    <vscale x 8 x i32> %2,
1430    <vscale x 8 x i1> %3,
1431    i32 %4)
1432
1433  ret <vscale x 8 x i8> %a
1434}
1435
1436declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32(
1437  <vscale x 16 x i8>*,
1438  <vscale x 16 x i32>,
1439  i32);
1440
1441define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
1442; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32:
1443; CHECK:       # %bb.0: # %entry
1444; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
1445; CHECK-NEXT:    vluxei32.v v26, (a0), v8
1446; CHECK-NEXT:    vmv2r.v v8, v26
1447; CHECK-NEXT:    ret
1448entry:
1449  %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32(
1450    <vscale x 16 x i8>* %0,
1451    <vscale x 16 x i32> %1,
1452    i32 %2)
1453
1454  ret <vscale x 16 x i8> %a
1455}
1456
1457declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32(
1458  <vscale x 16 x i8>,
1459  <vscale x 16 x i8>*,
1460  <vscale x 16 x i32>,
1461  <vscale x 16 x i1>,
1462  i32);
1463
1464define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
1465; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
1466; CHECK:       # %bb.0: # %entry
1467; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
1468; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
1469; CHECK-NEXT:    ret
1470entry:
1471  %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32(
1472    <vscale x 16 x i8> %0,
1473    <vscale x 16 x i8>* %1,
1474    <vscale x 16 x i32> %2,
1475    <vscale x 16 x i1> %3,
1476    i32 %4)
1477
1478  ret <vscale x 16 x i8> %a
1479}
1480
1481declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32(
1482  <vscale x 1 x i16>*,
1483  <vscale x 1 x i32>,
1484  i32);
1485
1486define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
1487; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32:
1488; CHECK:       # %bb.0: # %entry
1489; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1490; CHECK-NEXT:    vluxei32.v v25, (a0), v8
1491; CHECK-NEXT:    vmv1r.v v8, v25
1492; CHECK-NEXT:    ret
1493entry:
1494  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32(
1495    <vscale x 1 x i16>* %0,
1496    <vscale x 1 x i32> %1,
1497    i32 %2)
1498
1499  ret <vscale x 1 x i16> %a
1500}
1501
1502declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32(
1503  <vscale x 1 x i16>,
1504  <vscale x 1 x i16>*,
1505  <vscale x 1 x i32>,
1506  <vscale x 1 x i1>,
1507  i32);
1508
1509define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1510; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
1511; CHECK:       # %bb.0: # %entry
1512; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
1513; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
1514; CHECK-NEXT:    ret
1515entry:
1516  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32(
1517    <vscale x 1 x i16> %0,
1518    <vscale x 1 x i16>* %1,
1519    <vscale x 1 x i32> %2,
1520    <vscale x 1 x i1> %3,
1521    i32 %4)
1522
1523  ret <vscale x 1 x i16> %a
1524}
1525
1526declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32(
1527  <vscale x 2 x i16>*,
1528  <vscale x 2 x i32>,
1529  i32);
1530
1531define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
1532; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32:
1533; CHECK:       # %bb.0: # %entry
1534; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1535; CHECK-NEXT:    vluxei32.v v25, (a0), v8
1536; CHECK-NEXT:    vmv1r.v v8, v25
1537; CHECK-NEXT:    ret
1538entry:
1539  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32(
1540    <vscale x 2 x i16>* %0,
1541    <vscale x 2 x i32> %1,
1542    i32 %2)
1543
1544  ret <vscale x 2 x i16> %a
1545}
1546
1547declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32(
1548  <vscale x 2 x i16>,
1549  <vscale x 2 x i16>*,
1550  <vscale x 2 x i32>,
1551  <vscale x 2 x i1>,
1552  i32);
1553
1554define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1555; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
1556; CHECK:       # %bb.0: # %entry
1557; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
1558; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
1559; CHECK-NEXT:    ret
1560entry:
1561  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32(
1562    <vscale x 2 x i16> %0,
1563    <vscale x 2 x i16>* %1,
1564    <vscale x 2 x i32> %2,
1565    <vscale x 2 x i1> %3,
1566    i32 %4)
1567
1568  ret <vscale x 2 x i16> %a
1569}
1570
1571declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32(
1572  <vscale x 4 x i16>*,
1573  <vscale x 4 x i32>,
1574  i32);
1575
1576define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
1577; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32:
1578; CHECK:       # %bb.0: # %entry
1579; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1580; CHECK-NEXT:    vluxei32.v v25, (a0), v8
1581; CHECK-NEXT:    vmv1r.v v8, v25
1582; CHECK-NEXT:    ret
1583entry:
1584  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32(
1585    <vscale x 4 x i16>* %0,
1586    <vscale x 4 x i32> %1,
1587    i32 %2)
1588
1589  ret <vscale x 4 x i16> %a
1590}
1591
1592declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32(
1593  <vscale x 4 x i16>,
1594  <vscale x 4 x i16>*,
1595  <vscale x 4 x i32>,
1596  <vscale x 4 x i1>,
1597  i32);
1598
1599define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1600; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
1601; CHECK:       # %bb.0: # %entry
1602; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
1603; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
1604; CHECK-NEXT:    ret
1605entry:
1606  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32(
1607    <vscale x 4 x i16> %0,
1608    <vscale x 4 x i16>* %1,
1609    <vscale x 4 x i32> %2,
1610    <vscale x 4 x i1> %3,
1611    i32 %4)
1612
1613  ret <vscale x 4 x i16> %a
1614}
1615
1616declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32(
1617  <vscale x 8 x i16>*,
1618  <vscale x 8 x i32>,
1619  i32);
1620
1621define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
1622; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32:
1623; CHECK:       # %bb.0: # %entry
1624; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1625; CHECK-NEXT:    vluxei32.v v26, (a0), v8
1626; CHECK-NEXT:    vmv2r.v v8, v26
1627; CHECK-NEXT:    ret
1628entry:
1629  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32(
1630    <vscale x 8 x i16>* %0,
1631    <vscale x 8 x i32> %1,
1632    i32 %2)
1633
1634  ret <vscale x 8 x i16> %a
1635}
1636
1637declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32(
1638  <vscale x 8 x i16>,
1639  <vscale x 8 x i16>*,
1640  <vscale x 8 x i32>,
1641  <vscale x 8 x i1>,
1642  i32);
1643
1644define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1645; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
1646; CHECK:       # %bb.0: # %entry
1647; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
1648; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
1649; CHECK-NEXT:    ret
1650entry:
1651  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32(
1652    <vscale x 8 x i16> %0,
1653    <vscale x 8 x i16>* %1,
1654    <vscale x 8 x i32> %2,
1655    <vscale x 8 x i1> %3,
1656    i32 %4)
1657
1658  ret <vscale x 8 x i16> %a
1659}
1660
1661declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32(
1662  <vscale x 16 x i16>*,
1663  <vscale x 16 x i32>,
1664  i32);
1665
1666define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
1667; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32:
1668; CHECK:       # %bb.0: # %entry
1669; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1670; CHECK-NEXT:    vluxei32.v v28, (a0), v8
1671; CHECK-NEXT:    vmv4r.v v8, v28
1672; CHECK-NEXT:    ret
1673entry:
1674  %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32(
1675    <vscale x 16 x i16>* %0,
1676    <vscale x 16 x i32> %1,
1677    i32 %2)
1678
1679  ret <vscale x 16 x i16> %a
1680}
1681
1682declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32(
1683  <vscale x 16 x i16>,
1684  <vscale x 16 x i16>*,
1685  <vscale x 16 x i32>,
1686  <vscale x 16 x i1>,
1687  i32);
1688
1689define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
1690; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
1691; CHECK:       # %bb.0: # %entry
1692; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
1693; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
1694; CHECK-NEXT:    ret
1695entry:
1696  %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32(
1697    <vscale x 16 x i16> %0,
1698    <vscale x 16 x i16>* %1,
1699    <vscale x 16 x i32> %2,
1700    <vscale x 16 x i1> %3,
1701    i32 %4)
1702
1703  ret <vscale x 16 x i16> %a
1704}
1705
1706declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32(
1707  <vscale x 1 x i32>*,
1708  <vscale x 1 x i32>,
1709  i32);
1710
1711define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
1712; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32:
1713; CHECK:       # %bb.0: # %entry
1714; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
1715; CHECK-NEXT:    vluxei32.v v8, (a0), v8
1716; CHECK-NEXT:    ret
1717entry:
1718  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32(
1719    <vscale x 1 x i32>* %0,
1720    <vscale x 1 x i32> %1,
1721    i32 %2)
1722
1723  ret <vscale x 1 x i32> %a
1724}
1725
1726declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32(
1727  <vscale x 1 x i32>,
1728  <vscale x 1 x i32>*,
1729  <vscale x 1 x i32>,
1730  <vscale x 1 x i1>,
1731  i32);
1732
1733define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1734; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
1735; CHECK:       # %bb.0: # %entry
1736; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
1737; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
1738; CHECK-NEXT:    ret
1739entry:
1740  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32(
1741    <vscale x 1 x i32> %0,
1742    <vscale x 1 x i32>* %1,
1743    <vscale x 1 x i32> %2,
1744    <vscale x 1 x i1> %3,
1745    i32 %4)
1746
1747  ret <vscale x 1 x i32> %a
1748}
1749
1750declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32(
1751  <vscale x 2 x i32>*,
1752  <vscale x 2 x i32>,
1753  i32);
1754
1755define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
1756; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32:
1757; CHECK:       # %bb.0: # %entry
1758; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1759; CHECK-NEXT:    vluxei32.v v8, (a0), v8
1760; CHECK-NEXT:    ret
1761entry:
1762  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32(
1763    <vscale x 2 x i32>* %0,
1764    <vscale x 2 x i32> %1,
1765    i32 %2)
1766
1767  ret <vscale x 2 x i32> %a
1768}
1769
1770declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32(
1771  <vscale x 2 x i32>,
1772  <vscale x 2 x i32>*,
1773  <vscale x 2 x i32>,
1774  <vscale x 2 x i1>,
1775  i32);
1776
1777define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1778; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
1779; CHECK:       # %bb.0: # %entry
1780; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
1781; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
1782; CHECK-NEXT:    ret
1783entry:
1784  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32(
1785    <vscale x 2 x i32> %0,
1786    <vscale x 2 x i32>* %1,
1787    <vscale x 2 x i32> %2,
1788    <vscale x 2 x i1> %3,
1789    i32 %4)
1790
1791  ret <vscale x 2 x i32> %a
1792}
1793
1794declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32(
1795  <vscale x 4 x i32>*,
1796  <vscale x 4 x i32>,
1797  i32);
1798
1799define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
1800; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32:
1801; CHECK:       # %bb.0: # %entry
1802; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1803; CHECK-NEXT:    vluxei32.v v8, (a0), v8
1804; CHECK-NEXT:    ret
1805entry:
1806  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32(
1807    <vscale x 4 x i32>* %0,
1808    <vscale x 4 x i32> %1,
1809    i32 %2)
1810
1811  ret <vscale x 4 x i32> %a
1812}
1813
1814declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32(
1815  <vscale x 4 x i32>,
1816  <vscale x 4 x i32>*,
1817  <vscale x 4 x i32>,
1818  <vscale x 4 x i1>,
1819  i32);
1820
1821define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1822; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
1823; CHECK:       # %bb.0: # %entry
1824; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
1825; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
1826; CHECK-NEXT:    ret
1827entry:
1828  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32(
1829    <vscale x 4 x i32> %0,
1830    <vscale x 4 x i32>* %1,
1831    <vscale x 4 x i32> %2,
1832    <vscale x 4 x i1> %3,
1833    i32 %4)
1834
1835  ret <vscale x 4 x i32> %a
1836}
1837
1838declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32(
1839  <vscale x 8 x i32>*,
1840  <vscale x 8 x i32>,
1841  i32);
1842
1843define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
1844; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32:
1845; CHECK:       # %bb.0: # %entry
1846; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1847; CHECK-NEXT:    vluxei32.v v8, (a0), v8
1848; CHECK-NEXT:    ret
1849entry:
1850  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32(
1851    <vscale x 8 x i32>* %0,
1852    <vscale x 8 x i32> %1,
1853    i32 %2)
1854
1855  ret <vscale x 8 x i32> %a
1856}
1857
1858declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32(
1859  <vscale x 8 x i32>,
1860  <vscale x 8 x i32>*,
1861  <vscale x 8 x i32>,
1862  <vscale x 8 x i1>,
1863  i32);
1864
1865define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1866; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
1867; CHECK:       # %bb.0: # %entry
1868; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
1869; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
1870; CHECK-NEXT:    ret
1871entry:
1872  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32(
1873    <vscale x 8 x i32> %0,
1874    <vscale x 8 x i32>* %1,
1875    <vscale x 8 x i32> %2,
1876    <vscale x 8 x i1> %3,
1877    i32 %4)
1878
1879  ret <vscale x 8 x i32> %a
1880}
1881
1882declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32(
1883  <vscale x 16 x i32>*,
1884  <vscale x 16 x i32>,
1885  i32);
1886
1887define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
1888; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32:
1889; CHECK:       # %bb.0: # %entry
1890; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
1891; CHECK-NEXT:    vluxei32.v v8, (a0), v8
1892; CHECK-NEXT:    ret
1893entry:
1894  %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32(
1895    <vscale x 16 x i32>* %0,
1896    <vscale x 16 x i32> %1,
1897    i32 %2)
1898
1899  ret <vscale x 16 x i32> %a
1900}
1901
1902declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32(
1903  <vscale x 16 x i32>,
1904  <vscale x 16 x i32>*,
1905  <vscale x 16 x i32>,
1906  <vscale x 16 x i1>,
1907  i32);
1908
1909define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
1910; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
1911; CHECK:       # %bb.0: # %entry
1912; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
1913; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
1914; CHECK-NEXT:    ret
1915entry:
1916  %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32(
1917    <vscale x 16 x i32> %0,
1918    <vscale x 16 x i32>* %1,
1919    <vscale x 16 x i32> %2,
1920    <vscale x 16 x i1> %3,
1921    i32 %4)
1922
1923  ret <vscale x 16 x i32> %a
1924}
1925
1926declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32(
1927  <vscale x 1 x i64>*,
1928  <vscale x 1 x i32>,
1929  i32);
1930
1931define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
1932; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32:
1933; CHECK:       # %bb.0: # %entry
1934; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
1935; CHECK-NEXT:    vluxei32.v v25, (a0), v8
1936; CHECK-NEXT:    vmv1r.v v8, v25
1937; CHECK-NEXT:    ret
1938entry:
1939  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32(
1940    <vscale x 1 x i64>* %0,
1941    <vscale x 1 x i32> %1,
1942    i32 %2)
1943
1944  ret <vscale x 1 x i64> %a
1945}
1946
1947declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32(
1948  <vscale x 1 x i64>,
1949  <vscale x 1 x i64>*,
1950  <vscale x 1 x i32>,
1951  <vscale x 1 x i1>,
1952  i32);
1953
1954define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1955; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
1956; CHECK:       # %bb.0: # %entry
1957; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
1958; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
1959; CHECK-NEXT:    ret
1960entry:
1961  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32(
1962    <vscale x 1 x i64> %0,
1963    <vscale x 1 x i64>* %1,
1964    <vscale x 1 x i32> %2,
1965    <vscale x 1 x i1> %3,
1966    i32 %4)
1967
1968  ret <vscale x 1 x i64> %a
1969}
1970
1971declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32(
1972  <vscale x 2 x i64>*,
1973  <vscale x 2 x i32>,
1974  i32);
1975
1976define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
1977; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32:
1978; CHECK:       # %bb.0: # %entry
1979; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
1980; CHECK-NEXT:    vluxei32.v v26, (a0), v8
1981; CHECK-NEXT:    vmv2r.v v8, v26
1982; CHECK-NEXT:    ret
1983entry:
1984  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32(
1985    <vscale x 2 x i64>* %0,
1986    <vscale x 2 x i32> %1,
1987    i32 %2)
1988
1989  ret <vscale x 2 x i64> %a
1990}
1991
1992declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32(
1993  <vscale x 2 x i64>,
1994  <vscale x 2 x i64>*,
1995  <vscale x 2 x i32>,
1996  <vscale x 2 x i1>,
1997  i32);
1998
1999define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
2000; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
2001; CHECK:       # %bb.0: # %entry
2002; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
2003; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
2004; CHECK-NEXT:    ret
2005entry:
2006  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32(
2007    <vscale x 2 x i64> %0,
2008    <vscale x 2 x i64>* %1,
2009    <vscale x 2 x i32> %2,
2010    <vscale x 2 x i1> %3,
2011    i32 %4)
2012
2013  ret <vscale x 2 x i64> %a
2014}
2015
2016declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32(
2017  <vscale x 4 x i64>*,
2018  <vscale x 4 x i32>,
2019  i32);
2020
2021define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
2022; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32:
2023; CHECK:       # %bb.0: # %entry
2024; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
2025; CHECK-NEXT:    vluxei32.v v28, (a0), v8
2026; CHECK-NEXT:    vmv4r.v v8, v28
2027; CHECK-NEXT:    ret
2028entry:
2029  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32(
2030    <vscale x 4 x i64>* %0,
2031    <vscale x 4 x i32> %1,
2032    i32 %2)
2033
2034  ret <vscale x 4 x i64> %a
2035}
2036
2037declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32(
2038  <vscale x 4 x i64>,
2039  <vscale x 4 x i64>*,
2040  <vscale x 4 x i32>,
2041  <vscale x 4 x i1>,
2042  i32);
2043
2044define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
2045; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
2046; CHECK:       # %bb.0: # %entry
2047; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
2048; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
2049; CHECK-NEXT:    ret
2050entry:
2051  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32(
2052    <vscale x 4 x i64> %0,
2053    <vscale x 4 x i64>* %1,
2054    <vscale x 4 x i32> %2,
2055    <vscale x 4 x i1> %3,
2056    i32 %4)
2057
2058  ret <vscale x 4 x i64> %a
2059}
2060
2061declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32(
2062  <vscale x 8 x i64>*,
2063  <vscale x 8 x i32>,
2064  i32);
2065
2066define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
2067; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32:
2068; CHECK:       # %bb.0: # %entry
2069; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
2070; CHECK-NEXT:    vluxei32.v v16, (a0), v8
2071; CHECK-NEXT:    vmv8r.v v8, v16
2072; CHECK-NEXT:    ret
2073entry:
2074  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32(
2075    <vscale x 8 x i64>* %0,
2076    <vscale x 8 x i32> %1,
2077    i32 %2)
2078
2079  ret <vscale x 8 x i64> %a
2080}
2081
2082declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32(
2083  <vscale x 8 x i64>,
2084  <vscale x 8 x i64>*,
2085  <vscale x 8 x i32>,
2086  <vscale x 8 x i1>,
2087  i32);
2088
2089define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
2090; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
2091; CHECK:       # %bb.0: # %entry
2092; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
2093; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
2094; CHECK-NEXT:    ret
2095entry:
2096  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32(
2097    <vscale x 8 x i64> %0,
2098    <vscale x 8 x i64>* %1,
2099    <vscale x 8 x i32> %2,
2100    <vscale x 8 x i1> %3,
2101    i32 %4)
2102
2103  ret <vscale x 8 x i64> %a
2104}
2105
2106declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i32(
2107  <vscale x 1 x half>*,
2108  <vscale x 1 x i32>,
2109  i32);
2110
2111define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
2112; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32:
2113; CHECK:       # %bb.0: # %entry
2114; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
2115; CHECK-NEXT:    vluxei32.v v25, (a0), v8
2116; CHECK-NEXT:    vmv1r.v v8, v25
2117; CHECK-NEXT:    ret
2118entry:
2119  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i32(
2120    <vscale x 1 x half>* %0,
2121    <vscale x 1 x i32> %1,
2122    i32 %2)
2123
2124  ret <vscale x 1 x half> %a
2125}
2126
2127declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32(
2128  <vscale x 1 x half>,
2129  <vscale x 1 x half>*,
2130  <vscale x 1 x i32>,
2131  <vscale x 1 x i1>,
2132  i32);
2133
2134define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
2135; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
2136; CHECK:       # %bb.0: # %entry
2137; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
2138; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
2139; CHECK-NEXT:    ret
2140entry:
2141  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32(
2142    <vscale x 1 x half> %0,
2143    <vscale x 1 x half>* %1,
2144    <vscale x 1 x i32> %2,
2145    <vscale x 1 x i1> %3,
2146    i32 %4)
2147
2148  ret <vscale x 1 x half> %a
2149}
2150
2151declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i32(
2152  <vscale x 2 x half>*,
2153  <vscale x 2 x i32>,
2154  i32);
2155
2156define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
2157; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32:
2158; CHECK:       # %bb.0: # %entry
2159; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
2160; CHECK-NEXT:    vluxei32.v v25, (a0), v8
2161; CHECK-NEXT:    vmv1r.v v8, v25
2162; CHECK-NEXT:    ret
2163entry:
2164  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i32(
2165    <vscale x 2 x half>* %0,
2166    <vscale x 2 x i32> %1,
2167    i32 %2)
2168
2169  ret <vscale x 2 x half> %a
2170}
2171
2172declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32(
2173  <vscale x 2 x half>,
2174  <vscale x 2 x half>*,
2175  <vscale x 2 x i32>,
2176  <vscale x 2 x i1>,
2177  i32);
2178
2179define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
2180; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
2181; CHECK:       # %bb.0: # %entry
2182; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
2183; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
2184; CHECK-NEXT:    ret
2185entry:
2186  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32(
2187    <vscale x 2 x half> %0,
2188    <vscale x 2 x half>* %1,
2189    <vscale x 2 x i32> %2,
2190    <vscale x 2 x i1> %3,
2191    i32 %4)
2192
2193  ret <vscale x 2 x half> %a
2194}
2195
2196declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i32(
2197  <vscale x 4 x half>*,
2198  <vscale x 4 x i32>,
2199  i32);
2200
2201define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
2202; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32:
2203; CHECK:       # %bb.0: # %entry
2204; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
2205; CHECK-NEXT:    vluxei32.v v25, (a0), v8
2206; CHECK-NEXT:    vmv1r.v v8, v25
2207; CHECK-NEXT:    ret
2208entry:
2209  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i32(
2210    <vscale x 4 x half>* %0,
2211    <vscale x 4 x i32> %1,
2212    i32 %2)
2213
2214  ret <vscale x 4 x half> %a
2215}
2216
2217declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32(
2218  <vscale x 4 x half>,
2219  <vscale x 4 x half>*,
2220  <vscale x 4 x i32>,
2221  <vscale x 4 x i1>,
2222  i32);
2223
2224define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
2225; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
2226; CHECK:       # %bb.0: # %entry
2227; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
2228; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
2229; CHECK-NEXT:    ret
2230entry:
2231  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32(
2232    <vscale x 4 x half> %0,
2233    <vscale x 4 x half>* %1,
2234    <vscale x 4 x i32> %2,
2235    <vscale x 4 x i1> %3,
2236    i32 %4)
2237
2238  ret <vscale x 4 x half> %a
2239}
2240
2241declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i32(
2242  <vscale x 8 x half>*,
2243  <vscale x 8 x i32>,
2244  i32);
2245
2246define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
2247; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32:
2248; CHECK:       # %bb.0: # %entry
2249; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
2250; CHECK-NEXT:    vluxei32.v v26, (a0), v8
2251; CHECK-NEXT:    vmv2r.v v8, v26
2252; CHECK-NEXT:    ret
2253entry:
2254  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i32(
2255    <vscale x 8 x half>* %0,
2256    <vscale x 8 x i32> %1,
2257    i32 %2)
2258
2259  ret <vscale x 8 x half> %a
2260}
2261
2262declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32(
2263  <vscale x 8 x half>,
2264  <vscale x 8 x half>*,
2265  <vscale x 8 x i32>,
2266  <vscale x 8 x i1>,
2267  i32);
2268
2269define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
2270; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
2271; CHECK:       # %bb.0: # %entry
2272; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
2273; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
2274; CHECK-NEXT:    ret
2275entry:
2276  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32(
2277    <vscale x 8 x half> %0,
2278    <vscale x 8 x half>* %1,
2279    <vscale x 8 x i32> %2,
2280    <vscale x 8 x i1> %3,
2281    i32 %4)
2282
2283  ret <vscale x 8 x half> %a
2284}
2285
2286declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i32(
2287  <vscale x 16 x half>*,
2288  <vscale x 16 x i32>,
2289  i32);
2290
2291define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
2292; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32:
2293; CHECK:       # %bb.0: # %entry
2294; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
2295; CHECK-NEXT:    vluxei32.v v28, (a0), v8
2296; CHECK-NEXT:    vmv4r.v v8, v28
2297; CHECK-NEXT:    ret
2298entry:
2299  %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i32(
2300    <vscale x 16 x half>* %0,
2301    <vscale x 16 x i32> %1,
2302    i32 %2)
2303
2304  ret <vscale x 16 x half> %a
2305}
2306
2307declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32(
2308  <vscale x 16 x half>,
2309  <vscale x 16 x half>*,
2310  <vscale x 16 x i32>,
2311  <vscale x 16 x i1>,
2312  i32);
2313
2314define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
2315; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
2316; CHECK:       # %bb.0: # %entry
2317; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
2318; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
2319; CHECK-NEXT:    ret
2320entry:
2321  %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32(
2322    <vscale x 16 x half> %0,
2323    <vscale x 16 x half>* %1,
2324    <vscale x 16 x i32> %2,
2325    <vscale x 16 x i1> %3,
2326    i32 %4)
2327
2328  ret <vscale x 16 x half> %a
2329}
2330
2331declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32(
2332  <vscale x 1 x float>*,
2333  <vscale x 1 x i32>,
2334  i32);
2335
2336define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
2337; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32:
2338; CHECK:       # %bb.0: # %entry
2339; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
2340; CHECK-NEXT:    vluxei32.v v8, (a0), v8
2341; CHECK-NEXT:    ret
2342entry:
2343  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32(
2344    <vscale x 1 x float>* %0,
2345    <vscale x 1 x i32> %1,
2346    i32 %2)
2347
2348  ret <vscale x 1 x float> %a
2349}
2350
2351declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32(
2352  <vscale x 1 x float>,
2353  <vscale x 1 x float>*,
2354  <vscale x 1 x i32>,
2355  <vscale x 1 x i1>,
2356  i32);
2357
2358define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
2359; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
2360; CHECK:       # %bb.0: # %entry
2361; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
2362; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
2363; CHECK-NEXT:    ret
2364entry:
2365  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32(
2366    <vscale x 1 x float> %0,
2367    <vscale x 1 x float>* %1,
2368    <vscale x 1 x i32> %2,
2369    <vscale x 1 x i1> %3,
2370    i32 %4)
2371
2372  ret <vscale x 1 x float> %a
2373}
2374
2375declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32(
2376  <vscale x 2 x float>*,
2377  <vscale x 2 x i32>,
2378  i32);
2379
2380define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
2381; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32:
2382; CHECK:       # %bb.0: # %entry
2383; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
2384; CHECK-NEXT:    vluxei32.v v8, (a0), v8
2385; CHECK-NEXT:    ret
2386entry:
2387  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32(
2388    <vscale x 2 x float>* %0,
2389    <vscale x 2 x i32> %1,
2390    i32 %2)
2391
2392  ret <vscale x 2 x float> %a
2393}
2394
2395declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32(
2396  <vscale x 2 x float>,
2397  <vscale x 2 x float>*,
2398  <vscale x 2 x i32>,
2399  <vscale x 2 x i1>,
2400  i32);
2401
2402define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
2403; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
2404; CHECK:       # %bb.0: # %entry
2405; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
2406; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
2407; CHECK-NEXT:    ret
2408entry:
2409  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32(
2410    <vscale x 2 x float> %0,
2411    <vscale x 2 x float>* %1,
2412    <vscale x 2 x i32> %2,
2413    <vscale x 2 x i1> %3,
2414    i32 %4)
2415
2416  ret <vscale x 2 x float> %a
2417}
2418
2419declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32(
2420  <vscale x 4 x float>*,
2421  <vscale x 4 x i32>,
2422  i32);
2423
2424define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
2425; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32:
2426; CHECK:       # %bb.0: # %entry
2427; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
2428; CHECK-NEXT:    vluxei32.v v8, (a0), v8
2429; CHECK-NEXT:    ret
2430entry:
2431  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32(
2432    <vscale x 4 x float>* %0,
2433    <vscale x 4 x i32> %1,
2434    i32 %2)
2435
2436  ret <vscale x 4 x float> %a
2437}
2438
2439declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32(
2440  <vscale x 4 x float>,
2441  <vscale x 4 x float>*,
2442  <vscale x 4 x i32>,
2443  <vscale x 4 x i1>,
2444  i32);
2445
2446define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
2447; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
2448; CHECK:       # %bb.0: # %entry
2449; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
2450; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
2451; CHECK-NEXT:    ret
2452entry:
2453  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32(
2454    <vscale x 4 x float> %0,
2455    <vscale x 4 x float>* %1,
2456    <vscale x 4 x i32> %2,
2457    <vscale x 4 x i1> %3,
2458    i32 %4)
2459
2460  ret <vscale x 4 x float> %a
2461}
2462
2463declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32(
2464  <vscale x 8 x float>*,
2465  <vscale x 8 x i32>,
2466  i32);
2467
2468define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
2469; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32:
2470; CHECK:       # %bb.0: # %entry
2471; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
2472; CHECK-NEXT:    vluxei32.v v8, (a0), v8
2473; CHECK-NEXT:    ret
2474entry:
2475  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32(
2476    <vscale x 8 x float>* %0,
2477    <vscale x 8 x i32> %1,
2478    i32 %2)
2479
2480  ret <vscale x 8 x float> %a
2481}
2482
2483declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32(
2484  <vscale x 8 x float>,
2485  <vscale x 8 x float>*,
2486  <vscale x 8 x i32>,
2487  <vscale x 8 x i1>,
2488  i32);
2489
2490define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
2491; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
2492; CHECK:       # %bb.0: # %entry
2493; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
2494; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
2495; CHECK-NEXT:    ret
2496entry:
2497  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32(
2498    <vscale x 8 x float> %0,
2499    <vscale x 8 x float>* %1,
2500    <vscale x 8 x i32> %2,
2501    <vscale x 8 x i1> %3,
2502    i32 %4)
2503
2504  ret <vscale x 8 x float> %a
2505}
2506
2507declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32(
2508  <vscale x 16 x float>*,
2509  <vscale x 16 x i32>,
2510  i32);
2511
2512define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
2513; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32:
2514; CHECK:       # %bb.0: # %entry
2515; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
2516; CHECK-NEXT:    vluxei32.v v8, (a0), v8
2517; CHECK-NEXT:    ret
2518entry:
2519  %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32(
2520    <vscale x 16 x float>* %0,
2521    <vscale x 16 x i32> %1,
2522    i32 %2)
2523
2524  ret <vscale x 16 x float> %a
2525}
2526
2527declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32(
2528  <vscale x 16 x float>,
2529  <vscale x 16 x float>*,
2530  <vscale x 16 x i32>,
2531  <vscale x 16 x i1>,
2532  i32);
2533
2534define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
2535; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
2536; CHECK:       # %bb.0: # %entry
2537; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
2538; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
2539; CHECK-NEXT:    ret
2540entry:
2541  %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32(
2542    <vscale x 16 x float> %0,
2543    <vscale x 16 x float>* %1,
2544    <vscale x 16 x i32> %2,
2545    <vscale x 16 x i1> %3,
2546    i32 %4)
2547
2548  ret <vscale x 16 x float> %a
2549}
2550
2551declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32(
2552  <vscale x 1 x double>*,
2553  <vscale x 1 x i32>,
2554  i32);
2555
2556define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
2557; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32:
2558; CHECK:       # %bb.0: # %entry
2559; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
2560; CHECK-NEXT:    vluxei32.v v25, (a0), v8
2561; CHECK-NEXT:    vmv1r.v v8, v25
2562; CHECK-NEXT:    ret
2563entry:
2564  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32(
2565    <vscale x 1 x double>* %0,
2566    <vscale x 1 x i32> %1,
2567    i32 %2)
2568
2569  ret <vscale x 1 x double> %a
2570}
2571
2572declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32(
2573  <vscale x 1 x double>,
2574  <vscale x 1 x double>*,
2575  <vscale x 1 x i32>,
2576  <vscale x 1 x i1>,
2577  i32);
2578
2579define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
2580; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
2581; CHECK:       # %bb.0: # %entry
2582; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
2583; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
2584; CHECK-NEXT:    ret
2585entry:
2586  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32(
2587    <vscale x 1 x double> %0,
2588    <vscale x 1 x double>* %1,
2589    <vscale x 1 x i32> %2,
2590    <vscale x 1 x i1> %3,
2591    i32 %4)
2592
2593  ret <vscale x 1 x double> %a
2594}
2595
2596declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32(
2597  <vscale x 2 x double>*,
2598  <vscale x 2 x i32>,
2599  i32);
2600
2601define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
2602; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32:
2603; CHECK:       # %bb.0: # %entry
2604; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
2605; CHECK-NEXT:    vluxei32.v v26, (a0), v8
2606; CHECK-NEXT:    vmv2r.v v8, v26
2607; CHECK-NEXT:    ret
2608entry:
2609  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32(
2610    <vscale x 2 x double>* %0,
2611    <vscale x 2 x i32> %1,
2612    i32 %2)
2613
2614  ret <vscale x 2 x double> %a
2615}
2616
2617declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32(
2618  <vscale x 2 x double>,
2619  <vscale x 2 x double>*,
2620  <vscale x 2 x i32>,
2621  <vscale x 2 x i1>,
2622  i32);
2623
2624define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
2625; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
2626; CHECK:       # %bb.0: # %entry
2627; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
2628; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
2629; CHECK-NEXT:    ret
2630entry:
2631  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32(
2632    <vscale x 2 x double> %0,
2633    <vscale x 2 x double>* %1,
2634    <vscale x 2 x i32> %2,
2635    <vscale x 2 x i1> %3,
2636    i32 %4)
2637
2638  ret <vscale x 2 x double> %a
2639}
2640
2641declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32(
2642  <vscale x 4 x double>*,
2643  <vscale x 4 x i32>,
2644  i32);
2645
2646define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
2647; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32:
2648; CHECK:       # %bb.0: # %entry
2649; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
2650; CHECK-NEXT:    vluxei32.v v28, (a0), v8
2651; CHECK-NEXT:    vmv4r.v v8, v28
2652; CHECK-NEXT:    ret
2653entry:
2654  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32(
2655    <vscale x 4 x double>* %0,
2656    <vscale x 4 x i32> %1,
2657    i32 %2)
2658
2659  ret <vscale x 4 x double> %a
2660}
2661
2662declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32(
2663  <vscale x 4 x double>,
2664  <vscale x 4 x double>*,
2665  <vscale x 4 x i32>,
2666  <vscale x 4 x i1>,
2667  i32);
2668
2669define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
2670; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
2671; CHECK:       # %bb.0: # %entry
2672; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
2673; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
2674; CHECK-NEXT:    ret
2675entry:
2676  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32(
2677    <vscale x 4 x double> %0,
2678    <vscale x 4 x double>* %1,
2679    <vscale x 4 x i32> %2,
2680    <vscale x 4 x i1> %3,
2681    i32 %4)
2682
2683  ret <vscale x 4 x double> %a
2684}
2685
2686declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32(
2687  <vscale x 8 x double>*,
2688  <vscale x 8 x i32>,
2689  i32);
2690
2691define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
2692; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32:
2693; CHECK:       # %bb.0: # %entry
2694; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
2695; CHECK-NEXT:    vluxei32.v v16, (a0), v8
2696; CHECK-NEXT:    vmv8r.v v8, v16
2697; CHECK-NEXT:    ret
2698entry:
2699  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32(
2700    <vscale x 8 x double>* %0,
2701    <vscale x 8 x i32> %1,
2702    i32 %2)
2703
2704  ret <vscale x 8 x double> %a
2705}
2706
2707declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32(
2708  <vscale x 8 x double>,
2709  <vscale x 8 x double>*,
2710  <vscale x 8 x i32>,
2711  <vscale x 8 x i1>,
2712  i32);
2713
2714define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
2715; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
2716; CHECK:       # %bb.0: # %entry
2717; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
2718; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
2719; CHECK-NEXT:    ret
2720entry:
2721  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32(
2722    <vscale x 8 x double> %0,
2723    <vscale x 8 x double>* %1,
2724    <vscale x 8 x i32> %2,
2725    <vscale x 8 x i1> %3,
2726    i32 %4)
2727
2728  ret <vscale x 8 x double> %a
2729}
2730
2731declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16(
2732  <vscale x 1 x i8>*,
2733  <vscale x 1 x i16>,
2734  i32);
2735
2736define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
2737; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16:
2738; CHECK:       # %bb.0: # %entry
2739; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
2740; CHECK-NEXT:    vluxei16.v v25, (a0), v8
2741; CHECK-NEXT:    vmv1r.v v8, v25
2742; CHECK-NEXT:    ret
2743entry:
2744  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16(
2745    <vscale x 1 x i8>* %0,
2746    <vscale x 1 x i16> %1,
2747    i32 %2)
2748
2749  ret <vscale x 1 x i8> %a
2750}
2751
2752declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16(
2753  <vscale x 1 x i8>,
2754  <vscale x 1 x i8>*,
2755  <vscale x 1 x i16>,
2756  <vscale x 1 x i1>,
2757  i32);
2758
2759define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
2760; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
2761; CHECK:       # %bb.0: # %entry
2762; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
2763; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
2764; CHECK-NEXT:    ret
2765entry:
2766  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16(
2767    <vscale x 1 x i8> %0,
2768    <vscale x 1 x i8>* %1,
2769    <vscale x 1 x i16> %2,
2770    <vscale x 1 x i1> %3,
2771    i32 %4)
2772
2773  ret <vscale x 1 x i8> %a
2774}
2775
2776declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16(
2777  <vscale x 2 x i8>*,
2778  <vscale x 2 x i16>,
2779  i32);
2780
2781define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
2782; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16:
2783; CHECK:       # %bb.0: # %entry
2784; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
2785; CHECK-NEXT:    vluxei16.v v25, (a0), v8
2786; CHECK-NEXT:    vmv1r.v v8, v25
2787; CHECK-NEXT:    ret
2788entry:
2789  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16(
2790    <vscale x 2 x i8>* %0,
2791    <vscale x 2 x i16> %1,
2792    i32 %2)
2793
2794  ret <vscale x 2 x i8> %a
2795}
2796
2797declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16(
2798  <vscale x 2 x i8>,
2799  <vscale x 2 x i8>*,
2800  <vscale x 2 x i16>,
2801  <vscale x 2 x i1>,
2802  i32);
2803
2804define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
2805; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
2806; CHECK:       # %bb.0: # %entry
2807; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
2808; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
2809; CHECK-NEXT:    ret
2810entry:
2811  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16(
2812    <vscale x 2 x i8> %0,
2813    <vscale x 2 x i8>* %1,
2814    <vscale x 2 x i16> %2,
2815    <vscale x 2 x i1> %3,
2816    i32 %4)
2817
2818  ret <vscale x 2 x i8> %a
2819}
2820
2821declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16(
2822  <vscale x 4 x i8>*,
2823  <vscale x 4 x i16>,
2824  i32);
2825
2826define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
2827; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16:
2828; CHECK:       # %bb.0: # %entry
2829; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
2830; CHECK-NEXT:    vluxei16.v v25, (a0), v8
2831; CHECK-NEXT:    vmv1r.v v8, v25
2832; CHECK-NEXT:    ret
2833entry:
2834  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16(
2835    <vscale x 4 x i8>* %0,
2836    <vscale x 4 x i16> %1,
2837    i32 %2)
2838
2839  ret <vscale x 4 x i8> %a
2840}
2841
2842declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16(
2843  <vscale x 4 x i8>,
2844  <vscale x 4 x i8>*,
2845  <vscale x 4 x i16>,
2846  <vscale x 4 x i1>,
2847  i32);
2848
2849define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
2850; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
2851; CHECK:       # %bb.0: # %entry
2852; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
2853; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
2854; CHECK-NEXT:    ret
2855entry:
2856  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16(
2857    <vscale x 4 x i8> %0,
2858    <vscale x 4 x i8>* %1,
2859    <vscale x 4 x i16> %2,
2860    <vscale x 4 x i1> %3,
2861    i32 %4)
2862
2863  ret <vscale x 4 x i8> %a
2864}
2865
2866declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16(
2867  <vscale x 8 x i8>*,
2868  <vscale x 8 x i16>,
2869  i32);
2870
2871define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
2872; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16:
2873; CHECK:       # %bb.0: # %entry
2874; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
2875; CHECK-NEXT:    vluxei16.v v25, (a0), v8
2876; CHECK-NEXT:    vmv1r.v v8, v25
2877; CHECK-NEXT:    ret
2878entry:
2879  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16(
2880    <vscale x 8 x i8>* %0,
2881    <vscale x 8 x i16> %1,
2882    i32 %2)
2883
2884  ret <vscale x 8 x i8> %a
2885}
2886
2887declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16(
2888  <vscale x 8 x i8>,
2889  <vscale x 8 x i8>*,
2890  <vscale x 8 x i16>,
2891  <vscale x 8 x i1>,
2892  i32);
2893
2894define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
2895; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
2896; CHECK:       # %bb.0: # %entry
2897; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
2898; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
2899; CHECK-NEXT:    ret
2900entry:
2901  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16(
2902    <vscale x 8 x i8> %0,
2903    <vscale x 8 x i8>* %1,
2904    <vscale x 8 x i16> %2,
2905    <vscale x 8 x i1> %3,
2906    i32 %4)
2907
2908  ret <vscale x 8 x i8> %a
2909}
2910
2911declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16(
2912  <vscale x 16 x i8>*,
2913  <vscale x 16 x i16>,
2914  i32);
2915
2916define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
2917; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16:
2918; CHECK:       # %bb.0: # %entry
2919; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
2920; CHECK-NEXT:    vluxei16.v v26, (a0), v8
2921; CHECK-NEXT:    vmv2r.v v8, v26
2922; CHECK-NEXT:    ret
2923entry:
2924  %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16(
2925    <vscale x 16 x i8>* %0,
2926    <vscale x 16 x i16> %1,
2927    i32 %2)
2928
2929  ret <vscale x 16 x i8> %a
2930}
2931
2932declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16(
2933  <vscale x 16 x i8>,
2934  <vscale x 16 x i8>*,
2935  <vscale x 16 x i16>,
2936  <vscale x 16 x i1>,
2937  i32);
2938
2939define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
2940; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
2941; CHECK:       # %bb.0: # %entry
2942; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
2943; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
2944; CHECK-NEXT:    ret
2945entry:
2946  %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16(
2947    <vscale x 16 x i8> %0,
2948    <vscale x 16 x i8>* %1,
2949    <vscale x 16 x i16> %2,
2950    <vscale x 16 x i1> %3,
2951    i32 %4)
2952
2953  ret <vscale x 16 x i8> %a
2954}
2955
2956declare <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16(
2957  <vscale x 32 x i8>*,
2958  <vscale x 32 x i16>,
2959  i32);
2960
2961define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8>* %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
2962; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16:
2963; CHECK:       # %bb.0: # %entry
2964; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
2965; CHECK-NEXT:    vluxei16.v v28, (a0), v8
2966; CHECK-NEXT:    vmv4r.v v8, v28
2967; CHECK-NEXT:    ret
2968entry:
2969  %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16(
2970    <vscale x 32 x i8>* %0,
2971    <vscale x 32 x i16> %1,
2972    i32 %2)
2973
2974  ret <vscale x 32 x i8> %a
2975}
2976
2977declare <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16(
2978  <vscale x 32 x i8>,
2979  <vscale x 32 x i8>*,
2980  <vscale x 32 x i16>,
2981  <vscale x 32 x i1>,
2982  i32);
2983
2984define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
2985; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
2986; CHECK:       # %bb.0: # %entry
2987; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
2988; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
2989; CHECK-NEXT:    ret
2990entry:
2991  %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16(
2992    <vscale x 32 x i8> %0,
2993    <vscale x 32 x i8>* %1,
2994    <vscale x 32 x i16> %2,
2995    <vscale x 32 x i1> %3,
2996    i32 %4)
2997
2998  ret <vscale x 32 x i8> %a
2999}
3000
3001declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16(
3002  <vscale x 1 x i16>*,
3003  <vscale x 1 x i16>,
3004  i32);
3005
3006define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
3007; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16:
3008; CHECK:       # %bb.0: # %entry
3009; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
3010; CHECK-NEXT:    vluxei16.v v8, (a0), v8
3011; CHECK-NEXT:    ret
3012entry:
3013  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16(
3014    <vscale x 1 x i16>* %0,
3015    <vscale x 1 x i16> %1,
3016    i32 %2)
3017
3018  ret <vscale x 1 x i16> %a
3019}
3020
3021declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16(
3022  <vscale x 1 x i16>,
3023  <vscale x 1 x i16>*,
3024  <vscale x 1 x i16>,
3025  <vscale x 1 x i1>,
3026  i32);
3027
3028define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
3029; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
3030; CHECK:       # %bb.0: # %entry
3031; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
3032; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
3033; CHECK-NEXT:    ret
3034entry:
3035  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16(
3036    <vscale x 1 x i16> %0,
3037    <vscale x 1 x i16>* %1,
3038    <vscale x 1 x i16> %2,
3039    <vscale x 1 x i1> %3,
3040    i32 %4)
3041
3042  ret <vscale x 1 x i16> %a
3043}
3044
3045declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16(
3046  <vscale x 2 x i16>*,
3047  <vscale x 2 x i16>,
3048  i32);
3049
3050define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
3051; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16:
3052; CHECK:       # %bb.0: # %entry
3053; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
3054; CHECK-NEXT:    vluxei16.v v8, (a0), v8
3055; CHECK-NEXT:    ret
3056entry:
3057  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16(
3058    <vscale x 2 x i16>* %0,
3059    <vscale x 2 x i16> %1,
3060    i32 %2)
3061
3062  ret <vscale x 2 x i16> %a
3063}
3064
3065declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16(
3066  <vscale x 2 x i16>,
3067  <vscale x 2 x i16>*,
3068  <vscale x 2 x i16>,
3069  <vscale x 2 x i1>,
3070  i32);
3071
3072define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
3073; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
3074; CHECK:       # %bb.0: # %entry
3075; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
3076; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
3077; CHECK-NEXT:    ret
3078entry:
3079  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16(
3080    <vscale x 2 x i16> %0,
3081    <vscale x 2 x i16>* %1,
3082    <vscale x 2 x i16> %2,
3083    <vscale x 2 x i1> %3,
3084    i32 %4)
3085
3086  ret <vscale x 2 x i16> %a
3087}
3088
3089declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16(
3090  <vscale x 4 x i16>*,
3091  <vscale x 4 x i16>,
3092  i32);
3093
3094define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
3095; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16:
3096; CHECK:       # %bb.0: # %entry
3097; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
3098; CHECK-NEXT:    vluxei16.v v8, (a0), v8
3099; CHECK-NEXT:    ret
3100entry:
3101  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16(
3102    <vscale x 4 x i16>* %0,
3103    <vscale x 4 x i16> %1,
3104    i32 %2)
3105
3106  ret <vscale x 4 x i16> %a
3107}
3108
3109declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16(
3110  <vscale x 4 x i16>,
3111  <vscale x 4 x i16>*,
3112  <vscale x 4 x i16>,
3113  <vscale x 4 x i1>,
3114  i32);
3115
3116define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
3117; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
3118; CHECK:       # %bb.0: # %entry
3119; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
3120; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
3121; CHECK-NEXT:    ret
3122entry:
3123  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16(
3124    <vscale x 4 x i16> %0,
3125    <vscale x 4 x i16>* %1,
3126    <vscale x 4 x i16> %2,
3127    <vscale x 4 x i1> %3,
3128    i32 %4)
3129
3130  ret <vscale x 4 x i16> %a
3131}
3132
3133declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16(
3134  <vscale x 8 x i16>*,
3135  <vscale x 8 x i16>,
3136  i32);
3137
3138define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
3139; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16:
3140; CHECK:       # %bb.0: # %entry
3141; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
3142; CHECK-NEXT:    vluxei16.v v8, (a0), v8
3143; CHECK-NEXT:    ret
3144entry:
3145  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16(
3146    <vscale x 8 x i16>* %0,
3147    <vscale x 8 x i16> %1,
3148    i32 %2)
3149
3150  ret <vscale x 8 x i16> %a
3151}
3152
3153declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16(
3154  <vscale x 8 x i16>,
3155  <vscale x 8 x i16>*,
3156  <vscale x 8 x i16>,
3157  <vscale x 8 x i1>,
3158  i32);
3159
3160define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
3161; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
3162; CHECK:       # %bb.0: # %entry
3163; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
3164; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
3165; CHECK-NEXT:    ret
3166entry:
3167  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16(
3168    <vscale x 8 x i16> %0,
3169    <vscale x 8 x i16>* %1,
3170    <vscale x 8 x i16> %2,
3171    <vscale x 8 x i1> %3,
3172    i32 %4)
3173
3174  ret <vscale x 8 x i16> %a
3175}
3176
3177declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16(
3178  <vscale x 16 x i16>*,
3179  <vscale x 16 x i16>,
3180  i32);
3181
3182define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
3183; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16:
3184; CHECK:       # %bb.0: # %entry
3185; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
3186; CHECK-NEXT:    vluxei16.v v8, (a0), v8
3187; CHECK-NEXT:    ret
3188entry:
3189  %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16(
3190    <vscale x 16 x i16>* %0,
3191    <vscale x 16 x i16> %1,
3192    i32 %2)
3193
3194  ret <vscale x 16 x i16> %a
3195}
3196
3197declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16(
3198  <vscale x 16 x i16>,
3199  <vscale x 16 x i16>*,
3200  <vscale x 16 x i16>,
3201  <vscale x 16 x i1>,
3202  i32);
3203
3204define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
3205; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
3206; CHECK:       # %bb.0: # %entry
3207; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
3208; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
3209; CHECK-NEXT:    ret
3210entry:
3211  %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16(
3212    <vscale x 16 x i16> %0,
3213    <vscale x 16 x i16>* %1,
3214    <vscale x 16 x i16> %2,
3215    <vscale x 16 x i1> %3,
3216    i32 %4)
3217
3218  ret <vscale x 16 x i16> %a
3219}
3220
3221declare <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16(
3222  <vscale x 32 x i16>*,
3223  <vscale x 32 x i16>,
3224  i32);
3225
3226define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
3227; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16:
3228; CHECK:       # %bb.0: # %entry
3229; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
3230; CHECK-NEXT:    vluxei16.v v8, (a0), v8
3231; CHECK-NEXT:    ret
3232entry:
3233  %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16(
3234    <vscale x 32 x i16>* %0,
3235    <vscale x 32 x i16> %1,
3236    i32 %2)
3237
3238  ret <vscale x 32 x i16> %a
3239}
3240
3241declare <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16(
3242  <vscale x 32 x i16>,
3243  <vscale x 32 x i16>*,
3244  <vscale x 32 x i16>,
3245  <vscale x 32 x i1>,
3246  i32);
3247
3248define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
3249; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
3250; CHECK:       # %bb.0: # %entry
3251; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, mu
3252; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
3253; CHECK-NEXT:    ret
3254entry:
3255  %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16(
3256    <vscale x 32 x i16> %0,
3257    <vscale x 32 x i16>* %1,
3258    <vscale x 32 x i16> %2,
3259    <vscale x 32 x i1> %3,
3260    i32 %4)
3261
3262  ret <vscale x 32 x i16> %a
3263}
3264
3265declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16(
3266  <vscale x 1 x i32>*,
3267  <vscale x 1 x i16>,
3268  i32);
3269
3270define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
3271; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16:
3272; CHECK:       # %bb.0: # %entry
3273; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
3274; CHECK-NEXT:    vluxei16.v v25, (a0), v8
3275; CHECK-NEXT:    vmv1r.v v8, v25
3276; CHECK-NEXT:    ret
3277entry:
3278  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16(
3279    <vscale x 1 x i32>* %0,
3280    <vscale x 1 x i16> %1,
3281    i32 %2)
3282
3283  ret <vscale x 1 x i32> %a
3284}
3285
3286declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16(
3287  <vscale x 1 x i32>,
3288  <vscale x 1 x i32>*,
3289  <vscale x 1 x i16>,
3290  <vscale x 1 x i1>,
3291  i32);
3292
3293define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
3294; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
3295; CHECK:       # %bb.0: # %entry
3296; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
3297; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
3298; CHECK-NEXT:    ret
3299entry:
3300  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16(
3301    <vscale x 1 x i32> %0,
3302    <vscale x 1 x i32>* %1,
3303    <vscale x 1 x i16> %2,
3304    <vscale x 1 x i1> %3,
3305    i32 %4)
3306
3307  ret <vscale x 1 x i32> %a
3308}
3309
3310declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16(
3311  <vscale x 2 x i32>*,
3312  <vscale x 2 x i16>,
3313  i32);
3314
3315define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
3316; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16:
3317; CHECK:       # %bb.0: # %entry
3318; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
3319; CHECK-NEXT:    vluxei16.v v25, (a0), v8
3320; CHECK-NEXT:    vmv1r.v v8, v25
3321; CHECK-NEXT:    ret
3322entry:
3323  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16(
3324    <vscale x 2 x i32>* %0,
3325    <vscale x 2 x i16> %1,
3326    i32 %2)
3327
3328  ret <vscale x 2 x i32> %a
3329}
3330
3331declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16(
3332  <vscale x 2 x i32>,
3333  <vscale x 2 x i32>*,
3334  <vscale x 2 x i16>,
3335  <vscale x 2 x i1>,
3336  i32);
3337
3338define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
3339; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
3340; CHECK:       # %bb.0: # %entry
3341; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
3342; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
3343; CHECK-NEXT:    ret
3344entry:
3345  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16(
3346    <vscale x 2 x i32> %0,
3347    <vscale x 2 x i32>* %1,
3348    <vscale x 2 x i16> %2,
3349    <vscale x 2 x i1> %3,
3350    i32 %4)
3351
3352  ret <vscale x 2 x i32> %a
3353}
3354
3355declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16(
3356  <vscale x 4 x i32>*,
3357  <vscale x 4 x i16>,
3358  i32);
3359
3360define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
3361; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16:
3362; CHECK:       # %bb.0: # %entry
3363; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
3364; CHECK-NEXT:    vluxei16.v v26, (a0), v8
3365; CHECK-NEXT:    vmv2r.v v8, v26
3366; CHECK-NEXT:    ret
3367entry:
3368  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16(
3369    <vscale x 4 x i32>* %0,
3370    <vscale x 4 x i16> %1,
3371    i32 %2)
3372
3373  ret <vscale x 4 x i32> %a
3374}
3375
3376declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16(
3377  <vscale x 4 x i32>,
3378  <vscale x 4 x i32>*,
3379  <vscale x 4 x i16>,
3380  <vscale x 4 x i1>,
3381  i32);
3382
3383define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
3384; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
3385; CHECK:       # %bb.0: # %entry
3386; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
3387; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
3388; CHECK-NEXT:    ret
3389entry:
3390  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16(
3391    <vscale x 4 x i32> %0,
3392    <vscale x 4 x i32>* %1,
3393    <vscale x 4 x i16> %2,
3394    <vscale x 4 x i1> %3,
3395    i32 %4)
3396
3397  ret <vscale x 4 x i32> %a
3398}
3399
3400declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16(
3401  <vscale x 8 x i32>*,
3402  <vscale x 8 x i16>,
3403  i32);
3404
3405define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
3406; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16:
3407; CHECK:       # %bb.0: # %entry
3408; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
3409; CHECK-NEXT:    vluxei16.v v28, (a0), v8
3410; CHECK-NEXT:    vmv4r.v v8, v28
3411; CHECK-NEXT:    ret
3412entry:
3413  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16(
3414    <vscale x 8 x i32>* %0,
3415    <vscale x 8 x i16> %1,
3416    i32 %2)
3417
3418  ret <vscale x 8 x i32> %a
3419}
3420
3421declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16(
3422  <vscale x 8 x i32>,
3423  <vscale x 8 x i32>*,
3424  <vscale x 8 x i16>,
3425  <vscale x 8 x i1>,
3426  i32);
3427
3428define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
3429; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
3430; CHECK:       # %bb.0: # %entry
3431; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
3432; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
3433; CHECK-NEXT:    ret
3434entry:
3435  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16(
3436    <vscale x 8 x i32> %0,
3437    <vscale x 8 x i32>* %1,
3438    <vscale x 8 x i16> %2,
3439    <vscale x 8 x i1> %3,
3440    i32 %4)
3441
3442  ret <vscale x 8 x i32> %a
3443}
3444
3445declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16(
3446  <vscale x 16 x i32>*,
3447  <vscale x 16 x i16>,
3448  i32);
3449
3450define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
3451; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16:
3452; CHECK:       # %bb.0: # %entry
3453; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
3454; CHECK-NEXT:    vluxei16.v v16, (a0), v8
3455; CHECK-NEXT:    vmv8r.v v8, v16
3456; CHECK-NEXT:    ret
3457entry:
3458  %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16(
3459    <vscale x 16 x i32>* %0,
3460    <vscale x 16 x i16> %1,
3461    i32 %2)
3462
3463  ret <vscale x 16 x i32> %a
3464}
3465
3466declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16(
3467  <vscale x 16 x i32>,
3468  <vscale x 16 x i32>*,
3469  <vscale x 16 x i16>,
3470  <vscale x 16 x i1>,
3471  i32);
3472
3473define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
3474; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
3475; CHECK:       # %bb.0: # %entry
3476; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
3477; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
3478; CHECK-NEXT:    ret
3479entry:
3480  %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16(
3481    <vscale x 16 x i32> %0,
3482    <vscale x 16 x i32>* %1,
3483    <vscale x 16 x i16> %2,
3484    <vscale x 16 x i1> %3,
3485    i32 %4)
3486
3487  ret <vscale x 16 x i32> %a
3488}
3489
3490declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16(
3491  <vscale x 1 x i64>*,
3492  <vscale x 1 x i16>,
3493  i32);
3494
3495define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
3496; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16:
3497; CHECK:       # %bb.0: # %entry
3498; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
3499; CHECK-NEXT:    vluxei16.v v25, (a0), v8
3500; CHECK-NEXT:    vmv1r.v v8, v25
3501; CHECK-NEXT:    ret
3502entry:
3503  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16(
3504    <vscale x 1 x i64>* %0,
3505    <vscale x 1 x i16> %1,
3506    i32 %2)
3507
3508  ret <vscale x 1 x i64> %a
3509}
3510
3511declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16(
3512  <vscale x 1 x i64>,
3513  <vscale x 1 x i64>*,
3514  <vscale x 1 x i16>,
3515  <vscale x 1 x i1>,
3516  i32);
3517
3518define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
3519; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
3520; CHECK:       # %bb.0: # %entry
3521; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
3522; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
3523; CHECK-NEXT:    ret
3524entry:
3525  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16(
3526    <vscale x 1 x i64> %0,
3527    <vscale x 1 x i64>* %1,
3528    <vscale x 1 x i16> %2,
3529    <vscale x 1 x i1> %3,
3530    i32 %4)
3531
3532  ret <vscale x 1 x i64> %a
3533}
3534
3535declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16(
3536  <vscale x 2 x i64>*,
3537  <vscale x 2 x i16>,
3538  i32);
3539
3540define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
3541; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16:
3542; CHECK:       # %bb.0: # %entry
3543; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
3544; CHECK-NEXT:    vluxei16.v v26, (a0), v8
3545; CHECK-NEXT:    vmv2r.v v8, v26
3546; CHECK-NEXT:    ret
3547entry:
3548  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16(
3549    <vscale x 2 x i64>* %0,
3550    <vscale x 2 x i16> %1,
3551    i32 %2)
3552
3553  ret <vscale x 2 x i64> %a
3554}
3555
3556declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16(
3557  <vscale x 2 x i64>,
3558  <vscale x 2 x i64>*,
3559  <vscale x 2 x i16>,
3560  <vscale x 2 x i1>,
3561  i32);
3562
3563define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
3564; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
3565; CHECK:       # %bb.0: # %entry
3566; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
3567; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
3568; CHECK-NEXT:    ret
3569entry:
3570  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16(
3571    <vscale x 2 x i64> %0,
3572    <vscale x 2 x i64>* %1,
3573    <vscale x 2 x i16> %2,
3574    <vscale x 2 x i1> %3,
3575    i32 %4)
3576
3577  ret <vscale x 2 x i64> %a
3578}
3579
3580declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16(
3581  <vscale x 4 x i64>*,
3582  <vscale x 4 x i16>,
3583  i32);
3584
3585define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
3586; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16:
3587; CHECK:       # %bb.0: # %entry
3588; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
3589; CHECK-NEXT:    vluxei16.v v28, (a0), v8
3590; CHECK-NEXT:    vmv4r.v v8, v28
3591; CHECK-NEXT:    ret
3592entry:
3593  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16(
3594    <vscale x 4 x i64>* %0,
3595    <vscale x 4 x i16> %1,
3596    i32 %2)
3597
3598  ret <vscale x 4 x i64> %a
3599}
3600
3601declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16(
3602  <vscale x 4 x i64>,
3603  <vscale x 4 x i64>*,
3604  <vscale x 4 x i16>,
3605  <vscale x 4 x i1>,
3606  i32);
3607
3608define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
3609; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
3610; CHECK:       # %bb.0: # %entry
3611; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
3612; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
3613; CHECK-NEXT:    ret
3614entry:
3615  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16(
3616    <vscale x 4 x i64> %0,
3617    <vscale x 4 x i64>* %1,
3618    <vscale x 4 x i16> %2,
3619    <vscale x 4 x i1> %3,
3620    i32 %4)
3621
3622  ret <vscale x 4 x i64> %a
3623}
3624
3625declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16(
3626  <vscale x 8 x i64>*,
3627  <vscale x 8 x i16>,
3628  i32);
3629
3630define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
3631; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16:
3632; CHECK:       # %bb.0: # %entry
3633; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
3634; CHECK-NEXT:    vluxei16.v v16, (a0), v8
3635; CHECK-NEXT:    vmv8r.v v8, v16
3636; CHECK-NEXT:    ret
3637entry:
3638  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16(
3639    <vscale x 8 x i64>* %0,
3640    <vscale x 8 x i16> %1,
3641    i32 %2)
3642
3643  ret <vscale x 8 x i64> %a
3644}
3645
3646declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16(
3647  <vscale x 8 x i64>,
3648  <vscale x 8 x i64>*,
3649  <vscale x 8 x i16>,
3650  <vscale x 8 x i1>,
3651  i32);
3652
3653define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
3654; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
3655; CHECK:       # %bb.0: # %entry
3656; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
3657; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
3658; CHECK-NEXT:    ret
3659entry:
3660  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16(
3661    <vscale x 8 x i64> %0,
3662    <vscale x 8 x i64>* %1,
3663    <vscale x 8 x i16> %2,
3664    <vscale x 8 x i1> %3,
3665    i32 %4)
3666
3667  ret <vscale x 8 x i64> %a
3668}
3669
3670declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i16(
3671  <vscale x 1 x half>*,
3672  <vscale x 1 x i16>,
3673  i32);
3674
3675define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
3676; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16:
3677; CHECK:       # %bb.0: # %entry
3678; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
3679; CHECK-NEXT:    vluxei16.v v8, (a0), v8
3680; CHECK-NEXT:    ret
3681entry:
3682  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i16(
3683    <vscale x 1 x half>* %0,
3684    <vscale x 1 x i16> %1,
3685    i32 %2)
3686
3687  ret <vscale x 1 x half> %a
3688}
3689
3690declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16(
3691  <vscale x 1 x half>,
3692  <vscale x 1 x half>*,
3693  <vscale x 1 x i16>,
3694  <vscale x 1 x i1>,
3695  i32);
3696
3697define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
3698; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
3699; CHECK:       # %bb.0: # %entry
3700; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
3701; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
3702; CHECK-NEXT:    ret
3703entry:
3704  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16(
3705    <vscale x 1 x half> %0,
3706    <vscale x 1 x half>* %1,
3707    <vscale x 1 x i16> %2,
3708    <vscale x 1 x i1> %3,
3709    i32 %4)
3710
3711  ret <vscale x 1 x half> %a
3712}
3713
3714declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i16(
3715  <vscale x 2 x half>*,
3716  <vscale x 2 x i16>,
3717  i32);
3718
3719define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
3720; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16:
3721; CHECK:       # %bb.0: # %entry
3722; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
3723; CHECK-NEXT:    vluxei16.v v8, (a0), v8
3724; CHECK-NEXT:    ret
3725entry:
3726  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i16(
3727    <vscale x 2 x half>* %0,
3728    <vscale x 2 x i16> %1,
3729    i32 %2)
3730
3731  ret <vscale x 2 x half> %a
3732}
3733
3734declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16(
3735  <vscale x 2 x half>,
3736  <vscale x 2 x half>*,
3737  <vscale x 2 x i16>,
3738  <vscale x 2 x i1>,
3739  i32);
3740
3741define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
3742; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
3743; CHECK:       # %bb.0: # %entry
3744; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
3745; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
3746; CHECK-NEXT:    ret
3747entry:
3748  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16(
3749    <vscale x 2 x half> %0,
3750    <vscale x 2 x half>* %1,
3751    <vscale x 2 x i16> %2,
3752    <vscale x 2 x i1> %3,
3753    i32 %4)
3754
3755  ret <vscale x 2 x half> %a
3756}
3757
3758declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i16(
3759  <vscale x 4 x half>*,
3760  <vscale x 4 x i16>,
3761  i32);
3762
3763define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
3764; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16:
3765; CHECK:       # %bb.0: # %entry
3766; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
3767; CHECK-NEXT:    vluxei16.v v8, (a0), v8
3768; CHECK-NEXT:    ret
3769entry:
3770  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i16(
3771    <vscale x 4 x half>* %0,
3772    <vscale x 4 x i16> %1,
3773    i32 %2)
3774
3775  ret <vscale x 4 x half> %a
3776}
3777
3778declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16(
3779  <vscale x 4 x half>,
3780  <vscale x 4 x half>*,
3781  <vscale x 4 x i16>,
3782  <vscale x 4 x i1>,
3783  i32);
3784
3785define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
3786; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
3787; CHECK:       # %bb.0: # %entry
3788; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
3789; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
3790; CHECK-NEXT:    ret
3791entry:
3792  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16(
3793    <vscale x 4 x half> %0,
3794    <vscale x 4 x half>* %1,
3795    <vscale x 4 x i16> %2,
3796    <vscale x 4 x i1> %3,
3797    i32 %4)
3798
3799  ret <vscale x 4 x half> %a
3800}
3801
3802declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i16(
3803  <vscale x 8 x half>*,
3804  <vscale x 8 x i16>,
3805  i32);
3806
3807define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
3808; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16:
3809; CHECK:       # %bb.0: # %entry
3810; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
3811; CHECK-NEXT:    vluxei16.v v8, (a0), v8
3812; CHECK-NEXT:    ret
3813entry:
3814  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i16(
3815    <vscale x 8 x half>* %0,
3816    <vscale x 8 x i16> %1,
3817    i32 %2)
3818
3819  ret <vscale x 8 x half> %a
3820}
3821
3822declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16(
3823  <vscale x 8 x half>,
3824  <vscale x 8 x half>*,
3825  <vscale x 8 x i16>,
3826  <vscale x 8 x i1>,
3827  i32);
3828
3829define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
3830; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
3831; CHECK:       # %bb.0: # %entry
3832; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
3833; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
3834; CHECK-NEXT:    ret
3835entry:
3836  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16(
3837    <vscale x 8 x half> %0,
3838    <vscale x 8 x half>* %1,
3839    <vscale x 8 x i16> %2,
3840    <vscale x 8 x i1> %3,
3841    i32 %4)
3842
3843  ret <vscale x 8 x half> %a
3844}
3845
3846declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i16(
3847  <vscale x 16 x half>*,
3848  <vscale x 16 x i16>,
3849  i32);
3850
3851define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
3852; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16:
3853; CHECK:       # %bb.0: # %entry
3854; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
3855; CHECK-NEXT:    vluxei16.v v8, (a0), v8
3856; CHECK-NEXT:    ret
3857entry:
3858  %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i16(
3859    <vscale x 16 x half>* %0,
3860    <vscale x 16 x i16> %1,
3861    i32 %2)
3862
3863  ret <vscale x 16 x half> %a
3864}
3865
3866declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16(
3867  <vscale x 16 x half>,
3868  <vscale x 16 x half>*,
3869  <vscale x 16 x i16>,
3870  <vscale x 16 x i1>,
3871  i32);
3872
3873define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
3874; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
3875; CHECK:       # %bb.0: # %entry
3876; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
3877; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
3878; CHECK-NEXT:    ret
3879entry:
3880  %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16(
3881    <vscale x 16 x half> %0,
3882    <vscale x 16 x half>* %1,
3883    <vscale x 16 x i16> %2,
3884    <vscale x 16 x i1> %3,
3885    i32 %4)
3886
3887  ret <vscale x 16 x half> %a
3888}
3889
3890declare <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i16(
3891  <vscale x 32 x half>*,
3892  <vscale x 32 x i16>,
3893  i32);
3894
3895define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half>* %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
3896; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16:
3897; CHECK:       # %bb.0: # %entry
3898; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
3899; CHECK-NEXT:    vluxei16.v v8, (a0), v8
3900; CHECK-NEXT:    ret
3901entry:
3902  %a = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i16(
3903    <vscale x 32 x half>* %0,
3904    <vscale x 32 x i16> %1,
3905    i32 %2)
3906
3907  ret <vscale x 32 x half> %a
3908}
3909
3910declare <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16(
3911  <vscale x 32 x half>,
3912  <vscale x 32 x half>*,
3913  <vscale x 32 x i16>,
3914  <vscale x 32 x i1>,
3915  i32);
3916
3917define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
3918; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
3919; CHECK:       # %bb.0: # %entry
3920; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, mu
3921; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
3922; CHECK-NEXT:    ret
3923entry:
3924  %a = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16(
3925    <vscale x 32 x half> %0,
3926    <vscale x 32 x half>* %1,
3927    <vscale x 32 x i16> %2,
3928    <vscale x 32 x i1> %3,
3929    i32 %4)
3930
3931  ret <vscale x 32 x half> %a
3932}
3933
3934declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16(
3935  <vscale x 1 x float>*,
3936  <vscale x 1 x i16>,
3937  i32);
3938
3939define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
3940; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16:
3941; CHECK:       # %bb.0: # %entry
3942; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
3943; CHECK-NEXT:    vluxei16.v v25, (a0), v8
3944; CHECK-NEXT:    vmv1r.v v8, v25
3945; CHECK-NEXT:    ret
3946entry:
3947  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16(
3948    <vscale x 1 x float>* %0,
3949    <vscale x 1 x i16> %1,
3950    i32 %2)
3951
3952  ret <vscale x 1 x float> %a
3953}
3954
3955declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16(
3956  <vscale x 1 x float>,
3957  <vscale x 1 x float>*,
3958  <vscale x 1 x i16>,
3959  <vscale x 1 x i1>,
3960  i32);
3961
3962define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
3963; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
3964; CHECK:       # %bb.0: # %entry
3965; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
3966; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
3967; CHECK-NEXT:    ret
3968entry:
3969  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16(
3970    <vscale x 1 x float> %0,
3971    <vscale x 1 x float>* %1,
3972    <vscale x 1 x i16> %2,
3973    <vscale x 1 x i1> %3,
3974    i32 %4)
3975
3976  ret <vscale x 1 x float> %a
3977}
3978
3979declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16(
3980  <vscale x 2 x float>*,
3981  <vscale x 2 x i16>,
3982  i32);
3983
3984define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
3985; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16:
3986; CHECK:       # %bb.0: # %entry
3987; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
3988; CHECK-NEXT:    vluxei16.v v25, (a0), v8
3989; CHECK-NEXT:    vmv1r.v v8, v25
3990; CHECK-NEXT:    ret
3991entry:
3992  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16(
3993    <vscale x 2 x float>* %0,
3994    <vscale x 2 x i16> %1,
3995    i32 %2)
3996
3997  ret <vscale x 2 x float> %a
3998}
3999
4000declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16(
4001  <vscale x 2 x float>,
4002  <vscale x 2 x float>*,
4003  <vscale x 2 x i16>,
4004  <vscale x 2 x i1>,
4005  i32);
4006
4007define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
4008; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
4009; CHECK:       # %bb.0: # %entry
4010; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
4011; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
4012; CHECK-NEXT:    ret
4013entry:
4014  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16(
4015    <vscale x 2 x float> %0,
4016    <vscale x 2 x float>* %1,
4017    <vscale x 2 x i16> %2,
4018    <vscale x 2 x i1> %3,
4019    i32 %4)
4020
4021  ret <vscale x 2 x float> %a
4022}
4023
4024declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16(
4025  <vscale x 4 x float>*,
4026  <vscale x 4 x i16>,
4027  i32);
4028
4029define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
4030; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16:
4031; CHECK:       # %bb.0: # %entry
4032; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
4033; CHECK-NEXT:    vluxei16.v v26, (a0), v8
4034; CHECK-NEXT:    vmv2r.v v8, v26
4035; CHECK-NEXT:    ret
4036entry:
4037  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16(
4038    <vscale x 4 x float>* %0,
4039    <vscale x 4 x i16> %1,
4040    i32 %2)
4041
4042  ret <vscale x 4 x float> %a
4043}
4044
4045declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16(
4046  <vscale x 4 x float>,
4047  <vscale x 4 x float>*,
4048  <vscale x 4 x i16>,
4049  <vscale x 4 x i1>,
4050  i32);
4051
4052define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
4053; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
4054; CHECK:       # %bb.0: # %entry
4055; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
4056; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
4057; CHECK-NEXT:    ret
4058entry:
4059  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16(
4060    <vscale x 4 x float> %0,
4061    <vscale x 4 x float>* %1,
4062    <vscale x 4 x i16> %2,
4063    <vscale x 4 x i1> %3,
4064    i32 %4)
4065
4066  ret <vscale x 4 x float> %a
4067}
4068
4069declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16(
4070  <vscale x 8 x float>*,
4071  <vscale x 8 x i16>,
4072  i32);
4073
4074define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
4075; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16:
4076; CHECK:       # %bb.0: # %entry
4077; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
4078; CHECK-NEXT:    vluxei16.v v28, (a0), v8
4079; CHECK-NEXT:    vmv4r.v v8, v28
4080; CHECK-NEXT:    ret
4081entry:
4082  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16(
4083    <vscale x 8 x float>* %0,
4084    <vscale x 8 x i16> %1,
4085    i32 %2)
4086
4087  ret <vscale x 8 x float> %a
4088}
4089
4090declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16(
4091  <vscale x 8 x float>,
4092  <vscale x 8 x float>*,
4093  <vscale x 8 x i16>,
4094  <vscale x 8 x i1>,
4095  i32);
4096
4097define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
4098; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
4099; CHECK:       # %bb.0: # %entry
4100; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
4101; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
4102; CHECK-NEXT:    ret
4103entry:
4104  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16(
4105    <vscale x 8 x float> %0,
4106    <vscale x 8 x float>* %1,
4107    <vscale x 8 x i16> %2,
4108    <vscale x 8 x i1> %3,
4109    i32 %4)
4110
4111  ret <vscale x 8 x float> %a
4112}
4113
4114declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16(
4115  <vscale x 16 x float>*,
4116  <vscale x 16 x i16>,
4117  i32);
4118
4119define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
4120; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16:
4121; CHECK:       # %bb.0: # %entry
4122; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
4123; CHECK-NEXT:    vluxei16.v v16, (a0), v8
4124; CHECK-NEXT:    vmv8r.v v8, v16
4125; CHECK-NEXT:    ret
4126entry:
4127  %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16(
4128    <vscale x 16 x float>* %0,
4129    <vscale x 16 x i16> %1,
4130    i32 %2)
4131
4132  ret <vscale x 16 x float> %a
4133}
4134
4135declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16(
4136  <vscale x 16 x float>,
4137  <vscale x 16 x float>*,
4138  <vscale x 16 x i16>,
4139  <vscale x 16 x i1>,
4140  i32);
4141
4142define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
4143; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
4144; CHECK:       # %bb.0: # %entry
4145; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
4146; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
4147; CHECK-NEXT:    ret
4148entry:
4149  %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16(
4150    <vscale x 16 x float> %0,
4151    <vscale x 16 x float>* %1,
4152    <vscale x 16 x i16> %2,
4153    <vscale x 16 x i1> %3,
4154    i32 %4)
4155
4156  ret <vscale x 16 x float> %a
4157}
4158
4159declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16(
4160  <vscale x 1 x double>*,
4161  <vscale x 1 x i16>,
4162  i32);
4163
4164define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
4165; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16:
4166; CHECK:       # %bb.0: # %entry
4167; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
4168; CHECK-NEXT:    vluxei16.v v25, (a0), v8
4169; CHECK-NEXT:    vmv1r.v v8, v25
4170; CHECK-NEXT:    ret
4171entry:
4172  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16(
4173    <vscale x 1 x double>* %0,
4174    <vscale x 1 x i16> %1,
4175    i32 %2)
4176
4177  ret <vscale x 1 x double> %a
4178}
4179
4180declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16(
4181  <vscale x 1 x double>,
4182  <vscale x 1 x double>*,
4183  <vscale x 1 x i16>,
4184  <vscale x 1 x i1>,
4185  i32);
4186
4187define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
4188; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
4189; CHECK:       # %bb.0: # %entry
4190; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
4191; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
4192; CHECK-NEXT:    ret
4193entry:
4194  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16(
4195    <vscale x 1 x double> %0,
4196    <vscale x 1 x double>* %1,
4197    <vscale x 1 x i16> %2,
4198    <vscale x 1 x i1> %3,
4199    i32 %4)
4200
4201  ret <vscale x 1 x double> %a
4202}
4203
4204declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16(
4205  <vscale x 2 x double>*,
4206  <vscale x 2 x i16>,
4207  i32);
4208
4209define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
4210; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16:
4211; CHECK:       # %bb.0: # %entry
4212; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
4213; CHECK-NEXT:    vluxei16.v v26, (a0), v8
4214; CHECK-NEXT:    vmv2r.v v8, v26
4215; CHECK-NEXT:    ret
4216entry:
4217  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16(
4218    <vscale x 2 x double>* %0,
4219    <vscale x 2 x i16> %1,
4220    i32 %2)
4221
4222  ret <vscale x 2 x double> %a
4223}
4224
4225declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16(
4226  <vscale x 2 x double>,
4227  <vscale x 2 x double>*,
4228  <vscale x 2 x i16>,
4229  <vscale x 2 x i1>,
4230  i32);
4231
4232define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
4233; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
4234; CHECK:       # %bb.0: # %entry
4235; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
4236; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
4237; CHECK-NEXT:    ret
4238entry:
4239  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16(
4240    <vscale x 2 x double> %0,
4241    <vscale x 2 x double>* %1,
4242    <vscale x 2 x i16> %2,
4243    <vscale x 2 x i1> %3,
4244    i32 %4)
4245
4246  ret <vscale x 2 x double> %a
4247}
4248
4249declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16(
4250  <vscale x 4 x double>*,
4251  <vscale x 4 x i16>,
4252  i32);
4253
4254define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
4255; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16:
4256; CHECK:       # %bb.0: # %entry
4257; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
4258; CHECK-NEXT:    vluxei16.v v28, (a0), v8
4259; CHECK-NEXT:    vmv4r.v v8, v28
4260; CHECK-NEXT:    ret
4261entry:
4262  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16(
4263    <vscale x 4 x double>* %0,
4264    <vscale x 4 x i16> %1,
4265    i32 %2)
4266
4267  ret <vscale x 4 x double> %a
4268}
4269
4270declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16(
4271  <vscale x 4 x double>,
4272  <vscale x 4 x double>*,
4273  <vscale x 4 x i16>,
4274  <vscale x 4 x i1>,
4275  i32);
4276
4277define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
4278; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
4279; CHECK:       # %bb.0: # %entry
4280; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
4281; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
4282; CHECK-NEXT:    ret
4283entry:
4284  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16(
4285    <vscale x 4 x double> %0,
4286    <vscale x 4 x double>* %1,
4287    <vscale x 4 x i16> %2,
4288    <vscale x 4 x i1> %3,
4289    i32 %4)
4290
4291  ret <vscale x 4 x double> %a
4292}
4293
4294declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16(
4295  <vscale x 8 x double>*,
4296  <vscale x 8 x i16>,
4297  i32);
4298
4299define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
4300; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16:
4301; CHECK:       # %bb.0: # %entry
4302; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
4303; CHECK-NEXT:    vluxei16.v v16, (a0), v8
4304; CHECK-NEXT:    vmv8r.v v8, v16
4305; CHECK-NEXT:    ret
4306entry:
4307  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16(
4308    <vscale x 8 x double>* %0,
4309    <vscale x 8 x i16> %1,
4310    i32 %2)
4311
4312  ret <vscale x 8 x double> %a
4313}
4314
4315declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16(
4316  <vscale x 8 x double>,
4317  <vscale x 8 x double>*,
4318  <vscale x 8 x i16>,
4319  <vscale x 8 x i1>,
4320  i32);
4321
4322define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
4323; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
4324; CHECK:       # %bb.0: # %entry
4325; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
4326; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
4327; CHECK-NEXT:    ret
4328entry:
4329  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16(
4330    <vscale x 8 x double> %0,
4331    <vscale x 8 x double>* %1,
4332    <vscale x 8 x i16> %2,
4333    <vscale x 8 x i1> %3,
4334    i32 %4)
4335
4336  ret <vscale x 8 x double> %a
4337}
4338
4339declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8(
4340  <vscale x 1 x i8>*,
4341  <vscale x 1 x i8>,
4342  i32);
4343
4344define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
4345; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8:
4346; CHECK:       # %bb.0: # %entry
4347; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
4348; CHECK-NEXT:    vluxei8.v v8, (a0), v8
4349; CHECK-NEXT:    ret
4350entry:
4351  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8(
4352    <vscale x 1 x i8>* %0,
4353    <vscale x 1 x i8> %1,
4354    i32 %2)
4355
4356  ret <vscale x 1 x i8> %a
4357}
4358
4359declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8(
4360  <vscale x 1 x i8>,
4361  <vscale x 1 x i8>*,
4362  <vscale x 1 x i8>,
4363  <vscale x 1 x i1>,
4364  i32);
4365
4366define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
4367; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
4368; CHECK:       # %bb.0: # %entry
4369; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
4370; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
4371; CHECK-NEXT:    ret
4372entry:
4373  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8(
4374    <vscale x 1 x i8> %0,
4375    <vscale x 1 x i8>* %1,
4376    <vscale x 1 x i8> %2,
4377    <vscale x 1 x i1> %3,
4378    i32 %4)
4379
4380  ret <vscale x 1 x i8> %a
4381}
4382
4383declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8(
4384  <vscale x 2 x i8>*,
4385  <vscale x 2 x i8>,
4386  i32);
4387
4388define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
4389; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8:
4390; CHECK:       # %bb.0: # %entry
4391; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
4392; CHECK-NEXT:    vluxei8.v v8, (a0), v8
4393; CHECK-NEXT:    ret
4394entry:
4395  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8(
4396    <vscale x 2 x i8>* %0,
4397    <vscale x 2 x i8> %1,
4398    i32 %2)
4399
4400  ret <vscale x 2 x i8> %a
4401}
4402
4403declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8(
4404  <vscale x 2 x i8>,
4405  <vscale x 2 x i8>*,
4406  <vscale x 2 x i8>,
4407  <vscale x 2 x i1>,
4408  i32);
4409
4410define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
4411; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
4412; CHECK:       # %bb.0: # %entry
4413; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
4414; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
4415; CHECK-NEXT:    ret
4416entry:
4417  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8(
4418    <vscale x 2 x i8> %0,
4419    <vscale x 2 x i8>* %1,
4420    <vscale x 2 x i8> %2,
4421    <vscale x 2 x i1> %3,
4422    i32 %4)
4423
4424  ret <vscale x 2 x i8> %a
4425}
4426
4427declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8(
4428  <vscale x 4 x i8>*,
4429  <vscale x 4 x i8>,
4430  i32);
4431
4432define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
4433; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8:
4434; CHECK:       # %bb.0: # %entry
4435; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
4436; CHECK-NEXT:    vluxei8.v v8, (a0), v8
4437; CHECK-NEXT:    ret
4438entry:
4439  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8(
4440    <vscale x 4 x i8>* %0,
4441    <vscale x 4 x i8> %1,
4442    i32 %2)
4443
4444  ret <vscale x 4 x i8> %a
4445}
4446
4447declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8(
4448  <vscale x 4 x i8>,
4449  <vscale x 4 x i8>*,
4450  <vscale x 4 x i8>,
4451  <vscale x 4 x i1>,
4452  i32);
4453
4454define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
4455; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
4456; CHECK:       # %bb.0: # %entry
4457; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
4458; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
4459; CHECK-NEXT:    ret
4460entry:
4461  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8(
4462    <vscale x 4 x i8> %0,
4463    <vscale x 4 x i8>* %1,
4464    <vscale x 4 x i8> %2,
4465    <vscale x 4 x i1> %3,
4466    i32 %4)
4467
4468  ret <vscale x 4 x i8> %a
4469}
4470
4471declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8(
4472  <vscale x 8 x i8>*,
4473  <vscale x 8 x i8>,
4474  i32);
4475
4476define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
4477; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8:
4478; CHECK:       # %bb.0: # %entry
4479; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
4480; CHECK-NEXT:    vluxei8.v v8, (a0), v8
4481; CHECK-NEXT:    ret
4482entry:
4483  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8(
4484    <vscale x 8 x i8>* %0,
4485    <vscale x 8 x i8> %1,
4486    i32 %2)
4487
4488  ret <vscale x 8 x i8> %a
4489}
4490
4491declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8(
4492  <vscale x 8 x i8>,
4493  <vscale x 8 x i8>*,
4494  <vscale x 8 x i8>,
4495  <vscale x 8 x i1>,
4496  i32);
4497
4498define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
4499; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
4500; CHECK:       # %bb.0: # %entry
4501; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
4502; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
4503; CHECK-NEXT:    ret
4504entry:
4505  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8(
4506    <vscale x 8 x i8> %0,
4507    <vscale x 8 x i8>* %1,
4508    <vscale x 8 x i8> %2,
4509    <vscale x 8 x i1> %3,
4510    i32 %4)
4511
4512  ret <vscale x 8 x i8> %a
4513}
4514
4515declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8(
4516  <vscale x 16 x i8>*,
4517  <vscale x 16 x i8>,
4518  i32);
4519
4520define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
4521; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8:
4522; CHECK:       # %bb.0: # %entry
4523; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
4524; CHECK-NEXT:    vluxei8.v v8, (a0), v8
4525; CHECK-NEXT:    ret
4526entry:
4527  %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8(
4528    <vscale x 16 x i8>* %0,
4529    <vscale x 16 x i8> %1,
4530    i32 %2)
4531
4532  ret <vscale x 16 x i8> %a
4533}
4534
4535declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8(
4536  <vscale x 16 x i8>,
4537  <vscale x 16 x i8>*,
4538  <vscale x 16 x i8>,
4539  <vscale x 16 x i1>,
4540  i32);
4541
4542define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
4543; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
4544; CHECK:       # %bb.0: # %entry
4545; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
4546; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
4547; CHECK-NEXT:    ret
4548entry:
4549  %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8(
4550    <vscale x 16 x i8> %0,
4551    <vscale x 16 x i8>* %1,
4552    <vscale x 16 x i8> %2,
4553    <vscale x 16 x i1> %3,
4554    i32 %4)
4555
4556  ret <vscale x 16 x i8> %a
4557}
4558
4559declare <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8(
4560  <vscale x 32 x i8>*,
4561  <vscale x 32 x i8>,
4562  i32);
4563
4564define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
4565; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8:
4566; CHECK:       # %bb.0: # %entry
4567; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
4568; CHECK-NEXT:    vluxei8.v v8, (a0), v8
4569; CHECK-NEXT:    ret
4570entry:
4571  %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8(
4572    <vscale x 32 x i8>* %0,
4573    <vscale x 32 x i8> %1,
4574    i32 %2)
4575
4576  ret <vscale x 32 x i8> %a
4577}
4578
4579declare <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8(
4580  <vscale x 32 x i8>,
4581  <vscale x 32 x i8>*,
4582  <vscale x 32 x i8>,
4583  <vscale x 32 x i1>,
4584  i32);
4585
4586define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
4587; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
4588; CHECK:       # %bb.0: # %entry
4589; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
4590; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
4591; CHECK-NEXT:    ret
4592entry:
4593  %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8(
4594    <vscale x 32 x i8> %0,
4595    <vscale x 32 x i8>* %1,
4596    <vscale x 32 x i8> %2,
4597    <vscale x 32 x i1> %3,
4598    i32 %4)
4599
4600  ret <vscale x 32 x i8> %a
4601}
4602
4603declare <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8(
4604  <vscale x 64 x i8>*,
4605  <vscale x 64 x i8>,
4606  i32);
4607
4608define <vscale x 64 x i8> @intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
4609; CHECK-LABEL: intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8:
4610; CHECK:       # %bb.0: # %entry
4611; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
4612; CHECK-NEXT:    vluxei8.v v8, (a0), v8
4613; CHECK-NEXT:    ret
4614entry:
4615  %a = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8(
4616    <vscale x 64 x i8>* %0,
4617    <vscale x 64 x i8> %1,
4618    i32 %2)
4619
4620  ret <vscale x 64 x i8> %a
4621}
4622
4623declare <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8(
4624  <vscale x 64 x i8>,
4625  <vscale x 64 x i8>*,
4626  <vscale x 64 x i8>,
4627  <vscale x 64 x i1>,
4628  i32);
4629
4630define <vscale x 64 x i8> @intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
4631; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
4632; CHECK:       # %bb.0: # %entry
4633; CHECK-NEXT:    vsetvli zero, a1, e8, m8, tu, mu
4634; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
4635; CHECK-NEXT:    ret
4636entry:
4637  %a = call <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8(
4638    <vscale x 64 x i8> %0,
4639    <vscale x 64 x i8>* %1,
4640    <vscale x 64 x i8> %2,
4641    <vscale x 64 x i1> %3,
4642    i32 %4)
4643
4644  ret <vscale x 64 x i8> %a
4645}
4646
4647declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8(
4648  <vscale x 1 x i16>*,
4649  <vscale x 1 x i8>,
4650  i32);
4651
4652define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
4653; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8:
4654; CHECK:       # %bb.0: # %entry
4655; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
4656; CHECK-NEXT:    vluxei8.v v25, (a0), v8
4657; CHECK-NEXT:    vmv1r.v v8, v25
4658; CHECK-NEXT:    ret
4659entry:
4660  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8(
4661    <vscale x 1 x i16>* %0,
4662    <vscale x 1 x i8> %1,
4663    i32 %2)
4664
4665  ret <vscale x 1 x i16> %a
4666}
4667
4668declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8(
4669  <vscale x 1 x i16>,
4670  <vscale x 1 x i16>*,
4671  <vscale x 1 x i8>,
4672  <vscale x 1 x i1>,
4673  i32);
4674
4675define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
4676; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
4677; CHECK:       # %bb.0: # %entry
4678; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
4679; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
4680; CHECK-NEXT:    ret
4681entry:
4682  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8(
4683    <vscale x 1 x i16> %0,
4684    <vscale x 1 x i16>* %1,
4685    <vscale x 1 x i8> %2,
4686    <vscale x 1 x i1> %3,
4687    i32 %4)
4688
4689  ret <vscale x 1 x i16> %a
4690}
4691
4692declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8(
4693  <vscale x 2 x i16>*,
4694  <vscale x 2 x i8>,
4695  i32);
4696
4697define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
4698; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8:
4699; CHECK:       # %bb.0: # %entry
4700; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
4701; CHECK-NEXT:    vluxei8.v v25, (a0), v8
4702; CHECK-NEXT:    vmv1r.v v8, v25
4703; CHECK-NEXT:    ret
4704entry:
4705  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8(
4706    <vscale x 2 x i16>* %0,
4707    <vscale x 2 x i8> %1,
4708    i32 %2)
4709
4710  ret <vscale x 2 x i16> %a
4711}
4712
4713declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8(
4714  <vscale x 2 x i16>,
4715  <vscale x 2 x i16>*,
4716  <vscale x 2 x i8>,
4717  <vscale x 2 x i1>,
4718  i32);
4719
4720define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
4721; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
4722; CHECK:       # %bb.0: # %entry
4723; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
4724; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
4725; CHECK-NEXT:    ret
4726entry:
4727  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8(
4728    <vscale x 2 x i16> %0,
4729    <vscale x 2 x i16>* %1,
4730    <vscale x 2 x i8> %2,
4731    <vscale x 2 x i1> %3,
4732    i32 %4)
4733
4734  ret <vscale x 2 x i16> %a
4735}
4736
4737declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8(
4738  <vscale x 4 x i16>*,
4739  <vscale x 4 x i8>,
4740  i32);
4741
4742define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
4743; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8:
4744; CHECK:       # %bb.0: # %entry
4745; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
4746; CHECK-NEXT:    vluxei8.v v25, (a0), v8
4747; CHECK-NEXT:    vmv1r.v v8, v25
4748; CHECK-NEXT:    ret
4749entry:
4750  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8(
4751    <vscale x 4 x i16>* %0,
4752    <vscale x 4 x i8> %1,
4753    i32 %2)
4754
4755  ret <vscale x 4 x i16> %a
4756}
4757
4758declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8(
4759  <vscale x 4 x i16>,
4760  <vscale x 4 x i16>*,
4761  <vscale x 4 x i8>,
4762  <vscale x 4 x i1>,
4763  i32);
4764
4765define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
4766; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
4767; CHECK:       # %bb.0: # %entry
4768; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
4769; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
4770; CHECK-NEXT:    ret
4771entry:
4772  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8(
4773    <vscale x 4 x i16> %0,
4774    <vscale x 4 x i16>* %1,
4775    <vscale x 4 x i8> %2,
4776    <vscale x 4 x i1> %3,
4777    i32 %4)
4778
4779  ret <vscale x 4 x i16> %a
4780}
4781
4782declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8(
4783  <vscale x 8 x i16>*,
4784  <vscale x 8 x i8>,
4785  i32);
4786
4787define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
4788; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8:
4789; CHECK:       # %bb.0: # %entry
4790; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
4791; CHECK-NEXT:    vluxei8.v v26, (a0), v8
4792; CHECK-NEXT:    vmv2r.v v8, v26
4793; CHECK-NEXT:    ret
4794entry:
4795  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8(
4796    <vscale x 8 x i16>* %0,
4797    <vscale x 8 x i8> %1,
4798    i32 %2)
4799
4800  ret <vscale x 8 x i16> %a
4801}
4802
4803declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8(
4804  <vscale x 8 x i16>,
4805  <vscale x 8 x i16>*,
4806  <vscale x 8 x i8>,
4807  <vscale x 8 x i1>,
4808  i32);
4809
4810define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
4811; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
4812; CHECK:       # %bb.0: # %entry
4813; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
4814; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
4815; CHECK-NEXT:    ret
4816entry:
4817  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8(
4818    <vscale x 8 x i16> %0,
4819    <vscale x 8 x i16>* %1,
4820    <vscale x 8 x i8> %2,
4821    <vscale x 8 x i1> %3,
4822    i32 %4)
4823
4824  ret <vscale x 8 x i16> %a
4825}
4826
4827declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8(
4828  <vscale x 16 x i16>*,
4829  <vscale x 16 x i8>,
4830  i32);
4831
4832define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
4833; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8:
4834; CHECK:       # %bb.0: # %entry
4835; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
4836; CHECK-NEXT:    vluxei8.v v28, (a0), v8
4837; CHECK-NEXT:    vmv4r.v v8, v28
4838; CHECK-NEXT:    ret
4839entry:
4840  %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8(
4841    <vscale x 16 x i16>* %0,
4842    <vscale x 16 x i8> %1,
4843    i32 %2)
4844
4845  ret <vscale x 16 x i16> %a
4846}
4847
4848declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8(
4849  <vscale x 16 x i16>,
4850  <vscale x 16 x i16>*,
4851  <vscale x 16 x i8>,
4852  <vscale x 16 x i1>,
4853  i32);
4854
4855define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
4856; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
4857; CHECK:       # %bb.0: # %entry
4858; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
4859; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
4860; CHECK-NEXT:    ret
4861entry:
4862  %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8(
4863    <vscale x 16 x i16> %0,
4864    <vscale x 16 x i16>* %1,
4865    <vscale x 16 x i8> %2,
4866    <vscale x 16 x i1> %3,
4867    i32 %4)
4868
4869  ret <vscale x 16 x i16> %a
4870}
4871
4872declare <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8(
4873  <vscale x 32 x i16>*,
4874  <vscale x 32 x i8>,
4875  i32);
4876
4877define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16>* %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
4878; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8:
4879; CHECK:       # %bb.0: # %entry
4880; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
4881; CHECK-NEXT:    vluxei8.v v16, (a0), v8
4882; CHECK-NEXT:    vmv8r.v v8, v16
4883; CHECK-NEXT:    ret
4884entry:
4885  %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8(
4886    <vscale x 32 x i16>* %0,
4887    <vscale x 32 x i8> %1,
4888    i32 %2)
4889
4890  ret <vscale x 32 x i16> %a
4891}
4892
4893declare <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8(
4894  <vscale x 32 x i16>,
4895  <vscale x 32 x i16>*,
4896  <vscale x 32 x i8>,
4897  <vscale x 32 x i1>,
4898  i32);
4899
4900define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
4901; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
4902; CHECK:       # %bb.0: # %entry
4903; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, mu
4904; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
4905; CHECK-NEXT:    ret
4906entry:
4907  %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8(
4908    <vscale x 32 x i16> %0,
4909    <vscale x 32 x i16>* %1,
4910    <vscale x 32 x i8> %2,
4911    <vscale x 32 x i1> %3,
4912    i32 %4)
4913
4914  ret <vscale x 32 x i16> %a
4915}
4916
4917declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8(
4918  <vscale x 1 x i32>*,
4919  <vscale x 1 x i8>,
4920  i32);
4921
4922define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
4923; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8:
4924; CHECK:       # %bb.0: # %entry
4925; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
4926; CHECK-NEXT:    vluxei8.v v25, (a0), v8
4927; CHECK-NEXT:    vmv1r.v v8, v25
4928; CHECK-NEXT:    ret
4929entry:
4930  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8(
4931    <vscale x 1 x i32>* %0,
4932    <vscale x 1 x i8> %1,
4933    i32 %2)
4934
4935  ret <vscale x 1 x i32> %a
4936}
4937
4938declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8(
4939  <vscale x 1 x i32>,
4940  <vscale x 1 x i32>*,
4941  <vscale x 1 x i8>,
4942  <vscale x 1 x i1>,
4943  i32);
4944
4945define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
4946; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
4947; CHECK:       # %bb.0: # %entry
4948; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
4949; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
4950; CHECK-NEXT:    ret
4951entry:
4952  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8(
4953    <vscale x 1 x i32> %0,
4954    <vscale x 1 x i32>* %1,
4955    <vscale x 1 x i8> %2,
4956    <vscale x 1 x i1> %3,
4957    i32 %4)
4958
4959  ret <vscale x 1 x i32> %a
4960}
4961
4962declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8(
4963  <vscale x 2 x i32>*,
4964  <vscale x 2 x i8>,
4965  i32);
4966
4967define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
4968; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8:
4969; CHECK:       # %bb.0: # %entry
4970; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
4971; CHECK-NEXT:    vluxei8.v v25, (a0), v8
4972; CHECK-NEXT:    vmv1r.v v8, v25
4973; CHECK-NEXT:    ret
4974entry:
4975  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8(
4976    <vscale x 2 x i32>* %0,
4977    <vscale x 2 x i8> %1,
4978    i32 %2)
4979
4980  ret <vscale x 2 x i32> %a
4981}
4982
4983declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8(
4984  <vscale x 2 x i32>,
4985  <vscale x 2 x i32>*,
4986  <vscale x 2 x i8>,
4987  <vscale x 2 x i1>,
4988  i32);
4989
4990define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
4991; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
4992; CHECK:       # %bb.0: # %entry
4993; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
4994; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
4995; CHECK-NEXT:    ret
4996entry:
4997  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8(
4998    <vscale x 2 x i32> %0,
4999    <vscale x 2 x i32>* %1,
5000    <vscale x 2 x i8> %2,
5001    <vscale x 2 x i1> %3,
5002    i32 %4)
5003
5004  ret <vscale x 2 x i32> %a
5005}
5006
5007declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8(
5008  <vscale x 4 x i32>*,
5009  <vscale x 4 x i8>,
5010  i32);
5011
5012define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
5013; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8:
5014; CHECK:       # %bb.0: # %entry
5015; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
5016; CHECK-NEXT:    vluxei8.v v26, (a0), v8
5017; CHECK-NEXT:    vmv2r.v v8, v26
5018; CHECK-NEXT:    ret
5019entry:
5020  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8(
5021    <vscale x 4 x i32>* %0,
5022    <vscale x 4 x i8> %1,
5023    i32 %2)
5024
5025  ret <vscale x 4 x i32> %a
5026}
5027
5028declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8(
5029  <vscale x 4 x i32>,
5030  <vscale x 4 x i32>*,
5031  <vscale x 4 x i8>,
5032  <vscale x 4 x i1>,
5033  i32);
5034
5035define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
5036; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
5037; CHECK:       # %bb.0: # %entry
5038; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
5039; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
5040; CHECK-NEXT:    ret
5041entry:
5042  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8(
5043    <vscale x 4 x i32> %0,
5044    <vscale x 4 x i32>* %1,
5045    <vscale x 4 x i8> %2,
5046    <vscale x 4 x i1> %3,
5047    i32 %4)
5048
5049  ret <vscale x 4 x i32> %a
5050}
5051
5052declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8(
5053  <vscale x 8 x i32>*,
5054  <vscale x 8 x i8>,
5055  i32);
5056
5057define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
5058; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8:
5059; CHECK:       # %bb.0: # %entry
5060; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
5061; CHECK-NEXT:    vluxei8.v v28, (a0), v8
5062; CHECK-NEXT:    vmv4r.v v8, v28
5063; CHECK-NEXT:    ret
5064entry:
5065  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8(
5066    <vscale x 8 x i32>* %0,
5067    <vscale x 8 x i8> %1,
5068    i32 %2)
5069
5070  ret <vscale x 8 x i32> %a
5071}
5072
5073declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8(
5074  <vscale x 8 x i32>,
5075  <vscale x 8 x i32>*,
5076  <vscale x 8 x i8>,
5077  <vscale x 8 x i1>,
5078  i32);
5079
5080define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
5081; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
5082; CHECK:       # %bb.0: # %entry
5083; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
5084; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
5085; CHECK-NEXT:    ret
5086entry:
5087  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8(
5088    <vscale x 8 x i32> %0,
5089    <vscale x 8 x i32>* %1,
5090    <vscale x 8 x i8> %2,
5091    <vscale x 8 x i1> %3,
5092    i32 %4)
5093
5094  ret <vscale x 8 x i32> %a
5095}
5096
5097declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8(
5098  <vscale x 16 x i32>*,
5099  <vscale x 16 x i8>,
5100  i32);
5101
5102define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
5103; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8:
5104; CHECK:       # %bb.0: # %entry
5105; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
5106; CHECK-NEXT:    vluxei8.v v16, (a0), v8
5107; CHECK-NEXT:    vmv8r.v v8, v16
5108; CHECK-NEXT:    ret
5109entry:
5110  %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8(
5111    <vscale x 16 x i32>* %0,
5112    <vscale x 16 x i8> %1,
5113    i32 %2)
5114
5115  ret <vscale x 16 x i32> %a
5116}
5117
5118declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8(
5119  <vscale x 16 x i32>,
5120  <vscale x 16 x i32>*,
5121  <vscale x 16 x i8>,
5122  <vscale x 16 x i1>,
5123  i32);
5124
5125define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
5126; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
5127; CHECK:       # %bb.0: # %entry
5128; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
5129; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
5130; CHECK-NEXT:    ret
5131entry:
5132  %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8(
5133    <vscale x 16 x i32> %0,
5134    <vscale x 16 x i32>* %1,
5135    <vscale x 16 x i8> %2,
5136    <vscale x 16 x i1> %3,
5137    i32 %4)
5138
5139  ret <vscale x 16 x i32> %a
5140}
5141
5142declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8(
5143  <vscale x 1 x i64>*,
5144  <vscale x 1 x i8>,
5145  i32);
5146
5147define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
5148; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8:
5149; CHECK:       # %bb.0: # %entry
5150; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
5151; CHECK-NEXT:    vluxei8.v v25, (a0), v8
5152; CHECK-NEXT:    vmv1r.v v8, v25
5153; CHECK-NEXT:    ret
5154entry:
5155  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8(
5156    <vscale x 1 x i64>* %0,
5157    <vscale x 1 x i8> %1,
5158    i32 %2)
5159
5160  ret <vscale x 1 x i64> %a
5161}
5162
5163declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8(
5164  <vscale x 1 x i64>,
5165  <vscale x 1 x i64>*,
5166  <vscale x 1 x i8>,
5167  <vscale x 1 x i1>,
5168  i32);
5169
5170define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
5171; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
5172; CHECK:       # %bb.0: # %entry
5173; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
5174; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
5175; CHECK-NEXT:    ret
5176entry:
5177  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8(
5178    <vscale x 1 x i64> %0,
5179    <vscale x 1 x i64>* %1,
5180    <vscale x 1 x i8> %2,
5181    <vscale x 1 x i1> %3,
5182    i32 %4)
5183
5184  ret <vscale x 1 x i64> %a
5185}
5186
5187declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8(
5188  <vscale x 2 x i64>*,
5189  <vscale x 2 x i8>,
5190  i32);
5191
5192define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
5193; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8:
5194; CHECK:       # %bb.0: # %entry
5195; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
5196; CHECK-NEXT:    vluxei8.v v26, (a0), v8
5197; CHECK-NEXT:    vmv2r.v v8, v26
5198; CHECK-NEXT:    ret
5199entry:
5200  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8(
5201    <vscale x 2 x i64>* %0,
5202    <vscale x 2 x i8> %1,
5203    i32 %2)
5204
5205  ret <vscale x 2 x i64> %a
5206}
5207
5208declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8(
5209  <vscale x 2 x i64>,
5210  <vscale x 2 x i64>*,
5211  <vscale x 2 x i8>,
5212  <vscale x 2 x i1>,
5213  i32);
5214
5215define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
5216; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
5217; CHECK:       # %bb.0: # %entry
5218; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
5219; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
5220; CHECK-NEXT:    ret
5221entry:
5222  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8(
5223    <vscale x 2 x i64> %0,
5224    <vscale x 2 x i64>* %1,
5225    <vscale x 2 x i8> %2,
5226    <vscale x 2 x i1> %3,
5227    i32 %4)
5228
5229  ret <vscale x 2 x i64> %a
5230}
5231
5232declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8(
5233  <vscale x 4 x i64>*,
5234  <vscale x 4 x i8>,
5235  i32);
5236
5237define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
5238; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8:
5239; CHECK:       # %bb.0: # %entry
5240; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
5241; CHECK-NEXT:    vluxei8.v v28, (a0), v8
5242; CHECK-NEXT:    vmv4r.v v8, v28
5243; CHECK-NEXT:    ret
5244entry:
5245  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8(
5246    <vscale x 4 x i64>* %0,
5247    <vscale x 4 x i8> %1,
5248    i32 %2)
5249
5250  ret <vscale x 4 x i64> %a
5251}
5252
5253declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8(
5254  <vscale x 4 x i64>,
5255  <vscale x 4 x i64>*,
5256  <vscale x 4 x i8>,
5257  <vscale x 4 x i1>,
5258  i32);
5259
5260define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
5261; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
5262; CHECK:       # %bb.0: # %entry
5263; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
5264; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
5265; CHECK-NEXT:    ret
5266entry:
5267  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8(
5268    <vscale x 4 x i64> %0,
5269    <vscale x 4 x i64>* %1,
5270    <vscale x 4 x i8> %2,
5271    <vscale x 4 x i1> %3,
5272    i32 %4)
5273
5274  ret <vscale x 4 x i64> %a
5275}
5276
5277declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8(
5278  <vscale x 8 x i64>*,
5279  <vscale x 8 x i8>,
5280  i32);
5281
5282define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
5283; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8:
5284; CHECK:       # %bb.0: # %entry
5285; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
5286; CHECK-NEXT:    vluxei8.v v16, (a0), v8
5287; CHECK-NEXT:    vmv8r.v v8, v16
5288; CHECK-NEXT:    ret
5289entry:
5290  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8(
5291    <vscale x 8 x i64>* %0,
5292    <vscale x 8 x i8> %1,
5293    i32 %2)
5294
5295  ret <vscale x 8 x i64> %a
5296}
5297
5298declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8(
5299  <vscale x 8 x i64>,
5300  <vscale x 8 x i64>*,
5301  <vscale x 8 x i8>,
5302  <vscale x 8 x i1>,
5303  i32);
5304
5305define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
5306; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
5307; CHECK:       # %bb.0: # %entry
5308; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
5309; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
5310; CHECK-NEXT:    ret
5311entry:
5312  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8(
5313    <vscale x 8 x i64> %0,
5314    <vscale x 8 x i64>* %1,
5315    <vscale x 8 x i8> %2,
5316    <vscale x 8 x i1> %3,
5317    i32 %4)
5318
5319  ret <vscale x 8 x i64> %a
5320}
5321
5322declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i8(
5323  <vscale x 1 x half>*,
5324  <vscale x 1 x i8>,
5325  i32);
5326
5327define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
5328; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8:
5329; CHECK:       # %bb.0: # %entry
5330; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
5331; CHECK-NEXT:    vluxei8.v v25, (a0), v8
5332; CHECK-NEXT:    vmv1r.v v8, v25
5333; CHECK-NEXT:    ret
5334entry:
5335  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i8(
5336    <vscale x 1 x half>* %0,
5337    <vscale x 1 x i8> %1,
5338    i32 %2)
5339
5340  ret <vscale x 1 x half> %a
5341}
5342
5343declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8(
5344  <vscale x 1 x half>,
5345  <vscale x 1 x half>*,
5346  <vscale x 1 x i8>,
5347  <vscale x 1 x i1>,
5348  i32);
5349
5350define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
5351; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
5352; CHECK:       # %bb.0: # %entry
5353; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
5354; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
5355; CHECK-NEXT:    ret
5356entry:
5357  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8(
5358    <vscale x 1 x half> %0,
5359    <vscale x 1 x half>* %1,
5360    <vscale x 1 x i8> %2,
5361    <vscale x 1 x i1> %3,
5362    i32 %4)
5363
5364  ret <vscale x 1 x half> %a
5365}
5366
5367declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i8(
5368  <vscale x 2 x half>*,
5369  <vscale x 2 x i8>,
5370  i32);
5371
5372define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
5373; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8:
5374; CHECK:       # %bb.0: # %entry
5375; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
5376; CHECK-NEXT:    vluxei8.v v25, (a0), v8
5377; CHECK-NEXT:    vmv1r.v v8, v25
5378; CHECK-NEXT:    ret
5379entry:
5380  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i8(
5381    <vscale x 2 x half>* %0,
5382    <vscale x 2 x i8> %1,
5383    i32 %2)
5384
5385  ret <vscale x 2 x half> %a
5386}
5387
5388declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8(
5389  <vscale x 2 x half>,
5390  <vscale x 2 x half>*,
5391  <vscale x 2 x i8>,
5392  <vscale x 2 x i1>,
5393  i32);
5394
5395define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
5396; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
5397; CHECK:       # %bb.0: # %entry
5398; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
5399; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
5400; CHECK-NEXT:    ret
5401entry:
5402  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8(
5403    <vscale x 2 x half> %0,
5404    <vscale x 2 x half>* %1,
5405    <vscale x 2 x i8> %2,
5406    <vscale x 2 x i1> %3,
5407    i32 %4)
5408
5409  ret <vscale x 2 x half> %a
5410}
5411
5412declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i8(
5413  <vscale x 4 x half>*,
5414  <vscale x 4 x i8>,
5415  i32);
5416
5417define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
5418; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8:
5419; CHECK:       # %bb.0: # %entry
5420; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
5421; CHECK-NEXT:    vluxei8.v v25, (a0), v8
5422; CHECK-NEXT:    vmv1r.v v8, v25
5423; CHECK-NEXT:    ret
5424entry:
5425  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i8(
5426    <vscale x 4 x half>* %0,
5427    <vscale x 4 x i8> %1,
5428    i32 %2)
5429
5430  ret <vscale x 4 x half> %a
5431}
5432
5433declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8(
5434  <vscale x 4 x half>,
5435  <vscale x 4 x half>*,
5436  <vscale x 4 x i8>,
5437  <vscale x 4 x i1>,
5438  i32);
5439
5440define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
5441; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
5442; CHECK:       # %bb.0: # %entry
5443; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
5444; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
5445; CHECK-NEXT:    ret
5446entry:
5447  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8(
5448    <vscale x 4 x half> %0,
5449    <vscale x 4 x half>* %1,
5450    <vscale x 4 x i8> %2,
5451    <vscale x 4 x i1> %3,
5452    i32 %4)
5453
5454  ret <vscale x 4 x half> %a
5455}
5456
5457declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i8(
5458  <vscale x 8 x half>*,
5459  <vscale x 8 x i8>,
5460  i32);
5461
5462define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
5463; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8:
5464; CHECK:       # %bb.0: # %entry
5465; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
5466; CHECK-NEXT:    vluxei8.v v26, (a0), v8
5467; CHECK-NEXT:    vmv2r.v v8, v26
5468; CHECK-NEXT:    ret
5469entry:
5470  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i8(
5471    <vscale x 8 x half>* %0,
5472    <vscale x 8 x i8> %1,
5473    i32 %2)
5474
5475  ret <vscale x 8 x half> %a
5476}
5477
5478declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8(
5479  <vscale x 8 x half>,
5480  <vscale x 8 x half>*,
5481  <vscale x 8 x i8>,
5482  <vscale x 8 x i1>,
5483  i32);
5484
5485define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
5486; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
5487; CHECK:       # %bb.0: # %entry
5488; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
5489; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
5490; CHECK-NEXT:    ret
5491entry:
5492  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8(
5493    <vscale x 8 x half> %0,
5494    <vscale x 8 x half>* %1,
5495    <vscale x 8 x i8> %2,
5496    <vscale x 8 x i1> %3,
5497    i32 %4)
5498
5499  ret <vscale x 8 x half> %a
5500}
5501
5502declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i8(
5503  <vscale x 16 x half>*,
5504  <vscale x 16 x i8>,
5505  i32);
5506
5507define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
5508; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8:
5509; CHECK:       # %bb.0: # %entry
5510; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
5511; CHECK-NEXT:    vluxei8.v v28, (a0), v8
5512; CHECK-NEXT:    vmv4r.v v8, v28
5513; CHECK-NEXT:    ret
5514entry:
5515  %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i8(
5516    <vscale x 16 x half>* %0,
5517    <vscale x 16 x i8> %1,
5518    i32 %2)
5519
5520  ret <vscale x 16 x half> %a
5521}
5522
5523declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8(
5524  <vscale x 16 x half>,
5525  <vscale x 16 x half>*,
5526  <vscale x 16 x i8>,
5527  <vscale x 16 x i1>,
5528  i32);
5529
5530define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
5531; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
5532; CHECK:       # %bb.0: # %entry
5533; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
5534; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
5535; CHECK-NEXT:    ret
5536entry:
5537  %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8(
5538    <vscale x 16 x half> %0,
5539    <vscale x 16 x half>* %1,
5540    <vscale x 16 x i8> %2,
5541    <vscale x 16 x i1> %3,
5542    i32 %4)
5543
5544  ret <vscale x 16 x half> %a
5545}
5546
5547declare <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i8(
5548  <vscale x 32 x half>*,
5549  <vscale x 32 x i8>,
5550  i32);
5551
5552define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half>* %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
5553; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8:
5554; CHECK:       # %bb.0: # %entry
5555; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
5556; CHECK-NEXT:    vluxei8.v v16, (a0), v8
5557; CHECK-NEXT:    vmv8r.v v8, v16
5558; CHECK-NEXT:    ret
5559entry:
5560  %a = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i8(
5561    <vscale x 32 x half>* %0,
5562    <vscale x 32 x i8> %1,
5563    i32 %2)
5564
5565  ret <vscale x 32 x half> %a
5566}
5567
5568declare <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8(
5569  <vscale x 32 x half>,
5570  <vscale x 32 x half>*,
5571  <vscale x 32 x i8>,
5572  <vscale x 32 x i1>,
5573  i32);
5574
5575define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
5576; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
5577; CHECK:       # %bb.0: # %entry
5578; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, mu
5579; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
5580; CHECK-NEXT:    ret
5581entry:
5582  %a = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8(
5583    <vscale x 32 x half> %0,
5584    <vscale x 32 x half>* %1,
5585    <vscale x 32 x i8> %2,
5586    <vscale x 32 x i1> %3,
5587    i32 %4)
5588
5589  ret <vscale x 32 x half> %a
5590}
5591
5592declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8(
5593  <vscale x 1 x float>*,
5594  <vscale x 1 x i8>,
5595  i32);
5596
5597define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
5598; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8:
5599; CHECK:       # %bb.0: # %entry
5600; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
5601; CHECK-NEXT:    vluxei8.v v25, (a0), v8
5602; CHECK-NEXT:    vmv1r.v v8, v25
5603; CHECK-NEXT:    ret
5604entry:
5605  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8(
5606    <vscale x 1 x float>* %0,
5607    <vscale x 1 x i8> %1,
5608    i32 %2)
5609
5610  ret <vscale x 1 x float> %a
5611}
5612
5613declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8(
5614  <vscale x 1 x float>,
5615  <vscale x 1 x float>*,
5616  <vscale x 1 x i8>,
5617  <vscale x 1 x i1>,
5618  i32);
5619
5620define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
5621; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
5622; CHECK:       # %bb.0: # %entry
5623; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
5624; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
5625; CHECK-NEXT:    ret
5626entry:
5627  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8(
5628    <vscale x 1 x float> %0,
5629    <vscale x 1 x float>* %1,
5630    <vscale x 1 x i8> %2,
5631    <vscale x 1 x i1> %3,
5632    i32 %4)
5633
5634  ret <vscale x 1 x float> %a
5635}
5636
5637declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8(
5638  <vscale x 2 x float>*,
5639  <vscale x 2 x i8>,
5640  i32);
5641
5642define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
5643; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8:
5644; CHECK:       # %bb.0: # %entry
5645; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
5646; CHECK-NEXT:    vluxei8.v v25, (a0), v8
5647; CHECK-NEXT:    vmv1r.v v8, v25
5648; CHECK-NEXT:    ret
5649entry:
5650  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8(
5651    <vscale x 2 x float>* %0,
5652    <vscale x 2 x i8> %1,
5653    i32 %2)
5654
5655  ret <vscale x 2 x float> %a
5656}
5657
5658declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8(
5659  <vscale x 2 x float>,
5660  <vscale x 2 x float>*,
5661  <vscale x 2 x i8>,
5662  <vscale x 2 x i1>,
5663  i32);
5664
5665define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
5666; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
5667; CHECK:       # %bb.0: # %entry
5668; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
5669; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
5670; CHECK-NEXT:    ret
5671entry:
5672  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8(
5673    <vscale x 2 x float> %0,
5674    <vscale x 2 x float>* %1,
5675    <vscale x 2 x i8> %2,
5676    <vscale x 2 x i1> %3,
5677    i32 %4)
5678
5679  ret <vscale x 2 x float> %a
5680}
5681
5682declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8(
5683  <vscale x 4 x float>*,
5684  <vscale x 4 x i8>,
5685  i32);
5686
5687define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
5688; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8:
5689; CHECK:       # %bb.0: # %entry
5690; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
5691; CHECK-NEXT:    vluxei8.v v26, (a0), v8
5692; CHECK-NEXT:    vmv2r.v v8, v26
5693; CHECK-NEXT:    ret
5694entry:
5695  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8(
5696    <vscale x 4 x float>* %0,
5697    <vscale x 4 x i8> %1,
5698    i32 %2)
5699
5700  ret <vscale x 4 x float> %a
5701}
5702
5703declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8(
5704  <vscale x 4 x float>,
5705  <vscale x 4 x float>*,
5706  <vscale x 4 x i8>,
5707  <vscale x 4 x i1>,
5708  i32);
5709
5710define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
5711; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
5712; CHECK:       # %bb.0: # %entry
5713; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
5714; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
5715; CHECK-NEXT:    ret
5716entry:
5717  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8(
5718    <vscale x 4 x float> %0,
5719    <vscale x 4 x float>* %1,
5720    <vscale x 4 x i8> %2,
5721    <vscale x 4 x i1> %3,
5722    i32 %4)
5723
5724  ret <vscale x 4 x float> %a
5725}
5726
5727declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8(
5728  <vscale x 8 x float>*,
5729  <vscale x 8 x i8>,
5730  i32);
5731
5732define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
5733; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8:
5734; CHECK:       # %bb.0: # %entry
5735; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
5736; CHECK-NEXT:    vluxei8.v v28, (a0), v8
5737; CHECK-NEXT:    vmv4r.v v8, v28
5738; CHECK-NEXT:    ret
5739entry:
5740  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8(
5741    <vscale x 8 x float>* %0,
5742    <vscale x 8 x i8> %1,
5743    i32 %2)
5744
5745  ret <vscale x 8 x float> %a
5746}
5747
5748declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8(
5749  <vscale x 8 x float>,
5750  <vscale x 8 x float>*,
5751  <vscale x 8 x i8>,
5752  <vscale x 8 x i1>,
5753  i32);
5754
5755define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
5756; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
5757; CHECK:       # %bb.0: # %entry
5758; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
5759; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
5760; CHECK-NEXT:    ret
5761entry:
5762  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8(
5763    <vscale x 8 x float> %0,
5764    <vscale x 8 x float>* %1,
5765    <vscale x 8 x i8> %2,
5766    <vscale x 8 x i1> %3,
5767    i32 %4)
5768
5769  ret <vscale x 8 x float> %a
5770}
5771
5772declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8(
5773  <vscale x 16 x float>*,
5774  <vscale x 16 x i8>,
5775  i32);
5776
5777define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
5778; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8:
5779; CHECK:       # %bb.0: # %entry
5780; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
5781; CHECK-NEXT:    vluxei8.v v16, (a0), v8
5782; CHECK-NEXT:    vmv8r.v v8, v16
5783; CHECK-NEXT:    ret
5784entry:
5785  %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8(
5786    <vscale x 16 x float>* %0,
5787    <vscale x 16 x i8> %1,
5788    i32 %2)
5789
5790  ret <vscale x 16 x float> %a
5791}
5792
5793declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8(
5794  <vscale x 16 x float>,
5795  <vscale x 16 x float>*,
5796  <vscale x 16 x i8>,
5797  <vscale x 16 x i1>,
5798  i32);
5799
5800define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
5801; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
5802; CHECK:       # %bb.0: # %entry
5803; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
5804; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
5805; CHECK-NEXT:    ret
5806entry:
5807  %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8(
5808    <vscale x 16 x float> %0,
5809    <vscale x 16 x float>* %1,
5810    <vscale x 16 x i8> %2,
5811    <vscale x 16 x i1> %3,
5812    i32 %4)
5813
5814  ret <vscale x 16 x float> %a
5815}
5816
5817declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8(
5818  <vscale x 1 x double>*,
5819  <vscale x 1 x i8>,
5820  i32);
5821
5822define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
5823; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8:
5824; CHECK:       # %bb.0: # %entry
5825; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
5826; CHECK-NEXT:    vluxei8.v v25, (a0), v8
5827; CHECK-NEXT:    vmv1r.v v8, v25
5828; CHECK-NEXT:    ret
5829entry:
5830  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8(
5831    <vscale x 1 x double>* %0,
5832    <vscale x 1 x i8> %1,
5833    i32 %2)
5834
5835  ret <vscale x 1 x double> %a
5836}
5837
5838declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8(
5839  <vscale x 1 x double>,
5840  <vscale x 1 x double>*,
5841  <vscale x 1 x i8>,
5842  <vscale x 1 x i1>,
5843  i32);
5844
5845define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
5846; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
5847; CHECK:       # %bb.0: # %entry
5848; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
5849; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
5850; CHECK-NEXT:    ret
5851entry:
5852  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8(
5853    <vscale x 1 x double> %0,
5854    <vscale x 1 x double>* %1,
5855    <vscale x 1 x i8> %2,
5856    <vscale x 1 x i1> %3,
5857    i32 %4)
5858
5859  ret <vscale x 1 x double> %a
5860}
5861
5862declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8(
5863  <vscale x 2 x double>*,
5864  <vscale x 2 x i8>,
5865  i32);
5866
5867define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
5868; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8:
5869; CHECK:       # %bb.0: # %entry
5870; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
5871; CHECK-NEXT:    vluxei8.v v26, (a0), v8
5872; CHECK-NEXT:    vmv2r.v v8, v26
5873; CHECK-NEXT:    ret
5874entry:
5875  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8(
5876    <vscale x 2 x double>* %0,
5877    <vscale x 2 x i8> %1,
5878    i32 %2)
5879
5880  ret <vscale x 2 x double> %a
5881}
5882
5883declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8(
5884  <vscale x 2 x double>,
5885  <vscale x 2 x double>*,
5886  <vscale x 2 x i8>,
5887  <vscale x 2 x i1>,
5888  i32);
5889
5890define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
5891; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
5892; CHECK:       # %bb.0: # %entry
5893; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
5894; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
5895; CHECK-NEXT:    ret
5896entry:
5897  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8(
5898    <vscale x 2 x double> %0,
5899    <vscale x 2 x double>* %1,
5900    <vscale x 2 x i8> %2,
5901    <vscale x 2 x i1> %3,
5902    i32 %4)
5903
5904  ret <vscale x 2 x double> %a
5905}
5906
5907declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8(
5908  <vscale x 4 x double>*,
5909  <vscale x 4 x i8>,
5910  i32);
5911
5912define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
5913; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8:
5914; CHECK:       # %bb.0: # %entry
5915; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
5916; CHECK-NEXT:    vluxei8.v v28, (a0), v8
5917; CHECK-NEXT:    vmv4r.v v8, v28
5918; CHECK-NEXT:    ret
5919entry:
5920  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8(
5921    <vscale x 4 x double>* %0,
5922    <vscale x 4 x i8> %1,
5923    i32 %2)
5924
5925  ret <vscale x 4 x double> %a
5926}
5927
5928declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8(
5929  <vscale x 4 x double>,
5930  <vscale x 4 x double>*,
5931  <vscale x 4 x i8>,
5932  <vscale x 4 x i1>,
5933  i32);
5934
5935define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
5936; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
5937; CHECK:       # %bb.0: # %entry
5938; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
5939; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
5940; CHECK-NEXT:    ret
5941entry:
5942  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8(
5943    <vscale x 4 x double> %0,
5944    <vscale x 4 x double>* %1,
5945    <vscale x 4 x i8> %2,
5946    <vscale x 4 x i1> %3,
5947    i32 %4)
5948
5949  ret <vscale x 4 x double> %a
5950}
5951
5952declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8(
5953  <vscale x 8 x double>*,
5954  <vscale x 8 x i8>,
5955  i32);
5956
5957define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
5958; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8:
5959; CHECK:       # %bb.0: # %entry
5960; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
5961; CHECK-NEXT:    vluxei8.v v16, (a0), v8
5962; CHECK-NEXT:    vmv8r.v v8, v16
5963; CHECK-NEXT:    ret
5964entry:
5965  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8(
5966    <vscale x 8 x double>* %0,
5967    <vscale x 8 x i8> %1,
5968    i32 %2)
5969
5970  ret <vscale x 8 x double> %a
5971}
5972
5973declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8(
5974  <vscale x 8 x double>,
5975  <vscale x 8 x double>*,
5976  <vscale x 8 x i8>,
5977  <vscale x 8 x i1>,
5978  i32);
5979
5980define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
5981; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
5982; CHECK:       # %bb.0: # %entry
5983; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
5984; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
5985; CHECK-NEXT:    ret
5986entry:
5987  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8(
5988    <vscale x 8 x double> %0,
5989    <vscale x 8 x double>* %1,
5990    <vscale x 8 x i8> %2,
5991    <vscale x 8 x i1> %3,
5992    i32 %4)
5993
5994  ret <vscale x 8 x double> %a
5995}
5996