1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zvlsseg,+experimental-zfh \
3; RUN:     -verify-machineinstrs < %s | FileCheck %s
4
5declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(i16*, i64, i64)
6declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.mask.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, i64, <vscale x 16 x i1>, i64)
7
8define <vscale x 16 x i16> @test_vlsseg2_nxv16i16(i16* %base, i64 %offset, i64 %vl) {
9; CHECK-LABEL: test_vlsseg2_nxv16i16:
10; CHECK:       # %bb.0: # %entry
11; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
12; CHECK-NEXT:    vlsseg2e16.v v4, (a0), a1
13; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
14; CHECK-NEXT:    ret
15entry:
16  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i64 %offset, i64 %vl)
17  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
18  ret <vscale x 16 x i16> %1
19}
20
21define <vscale x 16 x i16> @test_vlsseg2_mask_nxv16i16(i16* %base, i64 %offset, i64 %vl, <vscale x 16 x i1> %mask) {
22; CHECK-LABEL: test_vlsseg2_mask_nxv16i16:
23; CHECK:       # %bb.0: # %entry
24; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
25; CHECK-NEXT:    vlsseg2e16.v v4, (a0), a1
26; CHECK-NEXT:    vmv4r.v v8, v4
27; CHECK-NEXT:    vsetvli zero, zero, e16, m4, tu, mu
28; CHECK-NEXT:    vlsseg2e16.v v4, (a0), a1, v0.t
29; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
30; CHECK-NEXT:    ret
31entry:
32  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i64 %offset, i64 %vl)
33  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 0
34  %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.mask.nxv16i16(<vscale x 16 x i16> %1,<vscale x 16 x i16> %1, i16* %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl)
35  %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %2, 1
36  ret <vscale x 16 x i16> %3
37}
38
39declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(i32*, i64, i64)
40declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, i64, <vscale x 4 x i1>, i64)
41
42define <vscale x 4 x i32> @test_vlsseg2_nxv4i32(i32* %base, i64 %offset, i64 %vl) {
43; CHECK-LABEL: test_vlsseg2_nxv4i32:
44; CHECK:       # %bb.0: # %entry
45; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
46; CHECK-NEXT:    vlsseg2e32.v v6, (a0), a1
47; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
48; CHECK-NEXT:    ret
49entry:
50  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(i32* %base, i64 %offset, i64 %vl)
51  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
52  ret <vscale x 4 x i32> %1
53}
54
55define <vscale x 4 x i32> @test_vlsseg2_mask_nxv4i32(i32* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
56; CHECK-LABEL: test_vlsseg2_mask_nxv4i32:
57; CHECK:       # %bb.0: # %entry
58; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
59; CHECK-NEXT:    vlsseg2e32.v v6, (a0), a1
60; CHECK-NEXT:    vmv2r.v v8, v6
61; CHECK-NEXT:    vsetvli zero, zero, e32, m2, tu, mu
62; CHECK-NEXT:    vlsseg2e32.v v6, (a0), a1, v0.t
63; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
64; CHECK-NEXT:    ret
65entry:
66  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(i32* %base, i64 %offset, i64 %vl)
67  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
68  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.mask.nxv4i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
69  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
70  ret <vscale x 4 x i32> %3
71}
72
73declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(i32*, i64, i64)
74declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, i64, <vscale x 4 x i1>, i64)
75
76define <vscale x 4 x i32> @test_vlsseg3_nxv4i32(i32* %base, i64 %offset, i64 %vl) {
77; CHECK-LABEL: test_vlsseg3_nxv4i32:
78; CHECK:       # %bb.0: # %entry
79; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
80; CHECK-NEXT:    vlsseg3e32.v v6, (a0), a1
81; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
82; CHECK-NEXT:    ret
83entry:
84  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(i32* %base, i64 %offset, i64 %vl)
85  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
86  ret <vscale x 4 x i32> %1
87}
88
89define <vscale x 4 x i32> @test_vlsseg3_mask_nxv4i32(i32* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
90; CHECK-LABEL: test_vlsseg3_mask_nxv4i32:
91; CHECK:       # %bb.0: # %entry
92; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
93; CHECK-NEXT:    vlsseg3e32.v v6, (a0), a1
94; CHECK-NEXT:    vmv2r.v v8, v6
95; CHECK-NEXT:    vmv2r.v v10, v6
96; CHECK-NEXT:    vsetvli zero, zero, e32, m2, tu, mu
97; CHECK-NEXT:    vlsseg3e32.v v6, (a0), a1, v0.t
98; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
99; CHECK-NEXT:    ret
100entry:
101  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(i32* %base, i64 %offset, i64 %vl)
102  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
103  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.mask.nxv4i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
104  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
105  ret <vscale x 4 x i32> %3
106}
107
108declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(i32*, i64, i64)
109declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, i64, <vscale x 4 x i1>, i64)
110
111define <vscale x 4 x i32> @test_vlsseg4_nxv4i32(i32* %base, i64 %offset, i64 %vl) {
112; CHECK-LABEL: test_vlsseg4_nxv4i32:
113; CHECK:       # %bb.0: # %entry
114; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
115; CHECK-NEXT:    vlsseg4e32.v v6, (a0), a1
116; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
117; CHECK-NEXT:    ret
118entry:
119  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(i32* %base, i64 %offset, i64 %vl)
120  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
121  ret <vscale x 4 x i32> %1
122}
123
124define <vscale x 4 x i32> @test_vlsseg4_mask_nxv4i32(i32* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
125; CHECK-LABEL: test_vlsseg4_mask_nxv4i32:
126; CHECK:       # %bb.0: # %entry
127; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
128; CHECK-NEXT:    vlsseg4e32.v v6, (a0), a1
129; CHECK-NEXT:    vmv2r.v v8, v6
130; CHECK-NEXT:    vmv2r.v v10, v6
131; CHECK-NEXT:    vmv2r.v v12, v6
132; CHECK-NEXT:    vsetvli zero, zero, e32, m2, tu, mu
133; CHECK-NEXT:    vlsseg4e32.v v6, (a0), a1, v0.t
134; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
135; CHECK-NEXT:    ret
136entry:
137  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(i32* %base, i64 %offset, i64 %vl)
138  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
139  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.mask.nxv4i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
140  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
141  ret <vscale x 4 x i32> %3
142}
143
144declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(i8*, i64, i64)
145declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, i64, <vscale x 16 x i1>, i64)
146
147define <vscale x 16 x i8> @test_vlsseg2_nxv16i8(i8* %base, i64 %offset, i64 %vl) {
148; CHECK-LABEL: test_vlsseg2_nxv16i8:
149; CHECK:       # %bb.0: # %entry
150; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
151; CHECK-NEXT:    vlsseg2e8.v v6, (a0), a1
152; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
153; CHECK-NEXT:    ret
154entry:
155  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(i8* %base, i64 %offset, i64 %vl)
156  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
157  ret <vscale x 16 x i8> %1
158}
159
160define <vscale x 16 x i8> @test_vlsseg2_mask_nxv16i8(i8* %base, i64 %offset, i64 %vl, <vscale x 16 x i1> %mask) {
161; CHECK-LABEL: test_vlsseg2_mask_nxv16i8:
162; CHECK:       # %bb.0: # %entry
163; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
164; CHECK-NEXT:    vlsseg2e8.v v6, (a0), a1
165; CHECK-NEXT:    vmv2r.v v8, v6
166; CHECK-NEXT:    vsetvli zero, zero, e8, m2, tu, mu
167; CHECK-NEXT:    vlsseg2e8.v v6, (a0), a1, v0.t
168; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
169; CHECK-NEXT:    ret
170entry:
171  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(i8* %base, i64 %offset, i64 %vl)
172  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
173  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.mask.nxv16i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl)
174  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
175  ret <vscale x 16 x i8> %3
176}
177
178declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(i8*, i64, i64)
179declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, i64, <vscale x 16 x i1>, i64)
180
181define <vscale x 16 x i8> @test_vlsseg3_nxv16i8(i8* %base, i64 %offset, i64 %vl) {
182; CHECK-LABEL: test_vlsseg3_nxv16i8:
183; CHECK:       # %bb.0: # %entry
184; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
185; CHECK-NEXT:    vlsseg3e8.v v6, (a0), a1
186; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
187; CHECK-NEXT:    ret
188entry:
189  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(i8* %base, i64 %offset, i64 %vl)
190  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
191  ret <vscale x 16 x i8> %1
192}
193
194define <vscale x 16 x i8> @test_vlsseg3_mask_nxv16i8(i8* %base, i64 %offset, i64 %vl, <vscale x 16 x i1> %mask) {
195; CHECK-LABEL: test_vlsseg3_mask_nxv16i8:
196; CHECK:       # %bb.0: # %entry
197; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
198; CHECK-NEXT:    vlsseg3e8.v v6, (a0), a1
199; CHECK-NEXT:    vmv2r.v v8, v6
200; CHECK-NEXT:    vmv2r.v v10, v6
201; CHECK-NEXT:    vsetvli zero, zero, e8, m2, tu, mu
202; CHECK-NEXT:    vlsseg3e8.v v6, (a0), a1, v0.t
203; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
204; CHECK-NEXT:    ret
205entry:
206  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(i8* %base, i64 %offset, i64 %vl)
207  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
208  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.mask.nxv16i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl)
209  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
210  ret <vscale x 16 x i8> %3
211}
212
213declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(i8*, i64, i64)
214declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, i64, <vscale x 16 x i1>, i64)
215
216define <vscale x 16 x i8> @test_vlsseg4_nxv16i8(i8* %base, i64 %offset, i64 %vl) {
217; CHECK-LABEL: test_vlsseg4_nxv16i8:
218; CHECK:       # %bb.0: # %entry
219; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
220; CHECK-NEXT:    vlsseg4e8.v v6, (a0), a1
221; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
222; CHECK-NEXT:    ret
223entry:
224  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(i8* %base, i64 %offset, i64 %vl)
225  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
226  ret <vscale x 16 x i8> %1
227}
228
229define <vscale x 16 x i8> @test_vlsseg4_mask_nxv16i8(i8* %base, i64 %offset, i64 %vl, <vscale x 16 x i1> %mask) {
230; CHECK-LABEL: test_vlsseg4_mask_nxv16i8:
231; CHECK:       # %bb.0: # %entry
232; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
233; CHECK-NEXT:    vlsseg4e8.v v6, (a0), a1
234; CHECK-NEXT:    vmv2r.v v8, v6
235; CHECK-NEXT:    vmv2r.v v10, v6
236; CHECK-NEXT:    vmv2r.v v12, v6
237; CHECK-NEXT:    vsetvli zero, zero, e8, m2, tu, mu
238; CHECK-NEXT:    vlsseg4e8.v v6, (a0), a1, v0.t
239; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
240; CHECK-NEXT:    ret
241entry:
242  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(i8* %base, i64 %offset, i64 %vl)
243  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
244  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.mask.nxv16i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl)
245  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
246  ret <vscale x 16 x i8> %3
247}
248
249declare {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg2.nxv1i64(i64*, i64, i64)
250declare {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg2.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, <vscale x 1 x i1>, i64)
251
252define <vscale x 1 x i64> @test_vlsseg2_nxv1i64(i64* %base, i64 %offset, i64 %vl) {
253; CHECK-LABEL: test_vlsseg2_nxv1i64:
254; CHECK:       # %bb.0: # %entry
255; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
256; CHECK-NEXT:    vlsseg2e64.v v7, (a0), a1
257; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
258; CHECK-NEXT:    ret
259entry:
260  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg2.nxv1i64(i64* %base, i64 %offset, i64 %vl)
261  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
262  ret <vscale x 1 x i64> %1
263}
264
265define <vscale x 1 x i64> @test_vlsseg2_mask_nxv1i64(i64* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
266; CHECK-LABEL: test_vlsseg2_mask_nxv1i64:
267; CHECK:       # %bb.0: # %entry
268; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
269; CHECK-NEXT:    vlsseg2e64.v v7, (a0), a1
270; CHECK-NEXT:    vmv1r.v v8, v7
271; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
272; CHECK-NEXT:    vlsseg2e64.v v7, (a0), a1, v0.t
273; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
274; CHECK-NEXT:    ret
275entry:
276  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg2.nxv1i64(i64* %base, i64 %offset, i64 %vl)
277  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
278  %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg2.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
279  %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
280  ret <vscale x 1 x i64> %3
281}
282
283declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg3.nxv1i64(i64*, i64, i64)
284declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg3.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, <vscale x 1 x i1>, i64)
285
286define <vscale x 1 x i64> @test_vlsseg3_nxv1i64(i64* %base, i64 %offset, i64 %vl) {
287; CHECK-LABEL: test_vlsseg3_nxv1i64:
288; CHECK:       # %bb.0: # %entry
289; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
290; CHECK-NEXT:    vlsseg3e64.v v7, (a0), a1
291; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
292; CHECK-NEXT:    ret
293entry:
294  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg3.nxv1i64(i64* %base, i64 %offset, i64 %vl)
295  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
296  ret <vscale x 1 x i64> %1
297}
298
299define <vscale x 1 x i64> @test_vlsseg3_mask_nxv1i64(i64* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
300; CHECK-LABEL: test_vlsseg3_mask_nxv1i64:
301; CHECK:       # %bb.0: # %entry
302; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
303; CHECK-NEXT:    vlsseg3e64.v v7, (a0), a1
304; CHECK-NEXT:    vmv1r.v v8, v7
305; CHECK-NEXT:    vmv1r.v v9, v7
306; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
307; CHECK-NEXT:    vlsseg3e64.v v7, (a0), a1, v0.t
308; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
309; CHECK-NEXT:    ret
310entry:
311  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg3.nxv1i64(i64* %base, i64 %offset, i64 %vl)
312  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
313  %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg3.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
314  %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
315  ret <vscale x 1 x i64> %3
316}
317
318declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg4.nxv1i64(i64*, i64, i64)
319declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg4.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, <vscale x 1 x i1>, i64)
320
321define <vscale x 1 x i64> @test_vlsseg4_nxv1i64(i64* %base, i64 %offset, i64 %vl) {
322; CHECK-LABEL: test_vlsseg4_nxv1i64:
323; CHECK:       # %bb.0: # %entry
324; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
325; CHECK-NEXT:    vlsseg4e64.v v7, (a0), a1
326; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
327; CHECK-NEXT:    ret
328entry:
329  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg4.nxv1i64(i64* %base, i64 %offset, i64 %vl)
330  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
331  ret <vscale x 1 x i64> %1
332}
333
334define <vscale x 1 x i64> @test_vlsseg4_mask_nxv1i64(i64* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
335; CHECK-LABEL: test_vlsseg4_mask_nxv1i64:
336; CHECK:       # %bb.0: # %entry
337; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
338; CHECK-NEXT:    vlsseg4e64.v v7, (a0), a1
339; CHECK-NEXT:    vmv1r.v v8, v7
340; CHECK-NEXT:    vmv1r.v v9, v7
341; CHECK-NEXT:    vmv1r.v v10, v7
342; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
343; CHECK-NEXT:    vlsseg4e64.v v7, (a0), a1, v0.t
344; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
345; CHECK-NEXT:    ret
346entry:
347  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg4.nxv1i64(i64* %base, i64 %offset, i64 %vl)
348  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
349  %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg4.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
350  %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
351  ret <vscale x 1 x i64> %3
352}
353
354declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg5.nxv1i64(i64*, i64, i64)
355declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg5.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, <vscale x 1 x i1>, i64)
356
357define <vscale x 1 x i64> @test_vlsseg5_nxv1i64(i64* %base, i64 %offset, i64 %vl) {
358; CHECK-LABEL: test_vlsseg5_nxv1i64:
359; CHECK:       # %bb.0: # %entry
360; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
361; CHECK-NEXT:    vlsseg5e64.v v7, (a0), a1
362; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
363; CHECK-NEXT:    ret
364entry:
365  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg5.nxv1i64(i64* %base, i64 %offset, i64 %vl)
366  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
367  ret <vscale x 1 x i64> %1
368}
369
370define <vscale x 1 x i64> @test_vlsseg5_mask_nxv1i64(i64* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
371; CHECK-LABEL: test_vlsseg5_mask_nxv1i64:
372; CHECK:       # %bb.0: # %entry
373; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
374; CHECK-NEXT:    vlsseg5e64.v v7, (a0), a1
375; CHECK-NEXT:    vmv1r.v v8, v7
376; CHECK-NEXT:    vmv1r.v v9, v7
377; CHECK-NEXT:    vmv1r.v v10, v7
378; CHECK-NEXT:    vmv1r.v v11, v7
379; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
380; CHECK-NEXT:    vlsseg5e64.v v7, (a0), a1, v0.t
381; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
382; CHECK-NEXT:    ret
383entry:
384  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg5.nxv1i64(i64* %base, i64 %offset, i64 %vl)
385  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
386  %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg5.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
387  %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
388  ret <vscale x 1 x i64> %3
389}
390
391declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg6.nxv1i64(i64*, i64, i64)
392declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg6.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, <vscale x 1 x i1>, i64)
393
394define <vscale x 1 x i64> @test_vlsseg6_nxv1i64(i64* %base, i64 %offset, i64 %vl) {
395; CHECK-LABEL: test_vlsseg6_nxv1i64:
396; CHECK:       # %bb.0: # %entry
397; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
398; CHECK-NEXT:    vlsseg6e64.v v7, (a0), a1
399; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
400; CHECK-NEXT:    ret
401entry:
402  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg6.nxv1i64(i64* %base, i64 %offset, i64 %vl)
403  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
404  ret <vscale x 1 x i64> %1
405}
406
407define <vscale x 1 x i64> @test_vlsseg6_mask_nxv1i64(i64* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
408; CHECK-LABEL: test_vlsseg6_mask_nxv1i64:
409; CHECK:       # %bb.0: # %entry
410; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
411; CHECK-NEXT:    vlsseg6e64.v v7, (a0), a1
412; CHECK-NEXT:    vmv1r.v v8, v7
413; CHECK-NEXT:    vmv1r.v v9, v7
414; CHECK-NEXT:    vmv1r.v v10, v7
415; CHECK-NEXT:    vmv1r.v v11, v7
416; CHECK-NEXT:    vmv1r.v v12, v7
417; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
418; CHECK-NEXT:    vlsseg6e64.v v7, (a0), a1, v0.t
419; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
420; CHECK-NEXT:    ret
421entry:
422  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg6.nxv1i64(i64* %base, i64 %offset, i64 %vl)
423  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
424  %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg6.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
425  %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
426  ret <vscale x 1 x i64> %3
427}
428
429declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg7.nxv1i64(i64*, i64, i64)
430declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg7.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, <vscale x 1 x i1>, i64)
431
432define <vscale x 1 x i64> @test_vlsseg7_nxv1i64(i64* %base, i64 %offset, i64 %vl) {
433; CHECK-LABEL: test_vlsseg7_nxv1i64:
434; CHECK:       # %bb.0: # %entry
435; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
436; CHECK-NEXT:    vlsseg7e64.v v7, (a0), a1
437; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
438; CHECK-NEXT:    ret
439entry:
440  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg7.nxv1i64(i64* %base, i64 %offset, i64 %vl)
441  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
442  ret <vscale x 1 x i64> %1
443}
444
445define <vscale x 1 x i64> @test_vlsseg7_mask_nxv1i64(i64* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
446; CHECK-LABEL: test_vlsseg7_mask_nxv1i64:
447; CHECK:       # %bb.0: # %entry
448; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
449; CHECK-NEXT:    vlsseg7e64.v v7, (a0), a1
450; CHECK-NEXT:    vmv1r.v v8, v7
451; CHECK-NEXT:    vmv1r.v v9, v7
452; CHECK-NEXT:    vmv1r.v v10, v7
453; CHECK-NEXT:    vmv1r.v v11, v7
454; CHECK-NEXT:    vmv1r.v v12, v7
455; CHECK-NEXT:    vmv1r.v v13, v7
456; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
457; CHECK-NEXT:    vlsseg7e64.v v7, (a0), a1, v0.t
458; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
459; CHECK-NEXT:    ret
460entry:
461  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg7.nxv1i64(i64* %base, i64 %offset, i64 %vl)
462  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
463  %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg7.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
464  %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
465  ret <vscale x 1 x i64> %3
466}
467
468declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg8.nxv1i64(i64*, i64, i64)
469declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg8.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, <vscale x 1 x i1>, i64)
470
471define <vscale x 1 x i64> @test_vlsseg8_nxv1i64(i64* %base, i64 %offset, i64 %vl) {
472; CHECK-LABEL: test_vlsseg8_nxv1i64:
473; CHECK:       # %bb.0: # %entry
474; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
475; CHECK-NEXT:    vlsseg8e64.v v7, (a0), a1
476; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
477; CHECK-NEXT:    ret
478entry:
479  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg8.nxv1i64(i64* %base, i64 %offset, i64 %vl)
480  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
481  ret <vscale x 1 x i64> %1
482}
483
484define <vscale x 1 x i64> @test_vlsseg8_mask_nxv1i64(i64* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
485; CHECK-LABEL: test_vlsseg8_mask_nxv1i64:
486; CHECK:       # %bb.0: # %entry
487; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
488; CHECK-NEXT:    vlsseg8e64.v v7, (a0), a1
489; CHECK-NEXT:    vmv1r.v v8, v7
490; CHECK-NEXT:    vmv1r.v v9, v7
491; CHECK-NEXT:    vmv1r.v v10, v7
492; CHECK-NEXT:    vmv1r.v v11, v7
493; CHECK-NEXT:    vmv1r.v v12, v7
494; CHECK-NEXT:    vmv1r.v v13, v7
495; CHECK-NEXT:    vmv1r.v v14, v7
496; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
497; CHECK-NEXT:    vlsseg8e64.v v7, (a0), a1, v0.t
498; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
499; CHECK-NEXT:    ret
500entry:
501  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg8.nxv1i64(i64* %base, i64 %offset, i64 %vl)
502  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
503  %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg8.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
504  %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
505  ret <vscale x 1 x i64> %3
506}
507
508declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(i32*, i64, i64)
509declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, <vscale x 1 x i1>, i64)
510
511define <vscale x 1 x i32> @test_vlsseg2_nxv1i32(i32* %base, i64 %offset, i64 %vl) {
512; CHECK-LABEL: test_vlsseg2_nxv1i32:
513; CHECK:       # %bb.0: # %entry
514; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
515; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1
516; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
517; CHECK-NEXT:    ret
518entry:
519  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(i32* %base, i64 %offset, i64 %vl)
520  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
521  ret <vscale x 1 x i32> %1
522}
523
524define <vscale x 1 x i32> @test_vlsseg2_mask_nxv1i32(i32* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
525; CHECK-LABEL: test_vlsseg2_mask_nxv1i32:
526; CHECK:       # %bb.0: # %entry
527; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
528; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1
529; CHECK-NEXT:    vmv1r.v v8, v7
530; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, tu, mu
531; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1, v0.t
532; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
533; CHECK-NEXT:    ret
534entry:
535  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(i32* %base, i64 %offset, i64 %vl)
536  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
537  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
538  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
539  ret <vscale x 1 x i32> %3
540}
541
542declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(i32*, i64, i64)
543declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, <vscale x 1 x i1>, i64)
544
545define <vscale x 1 x i32> @test_vlsseg3_nxv1i32(i32* %base, i64 %offset, i64 %vl) {
546; CHECK-LABEL: test_vlsseg3_nxv1i32:
547; CHECK:       # %bb.0: # %entry
548; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
549; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1
550; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
551; CHECK-NEXT:    ret
552entry:
553  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(i32* %base, i64 %offset, i64 %vl)
554  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
555  ret <vscale x 1 x i32> %1
556}
557
558define <vscale x 1 x i32> @test_vlsseg3_mask_nxv1i32(i32* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
559; CHECK-LABEL: test_vlsseg3_mask_nxv1i32:
560; CHECK:       # %bb.0: # %entry
561; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
562; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1
563; CHECK-NEXT:    vmv1r.v v8, v7
564; CHECK-NEXT:    vmv1r.v v9, v7
565; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, tu, mu
566; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1, v0.t
567; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
568; CHECK-NEXT:    ret
569entry:
570  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(i32* %base, i64 %offset, i64 %vl)
571  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
572  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
573  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
574  ret <vscale x 1 x i32> %3
575}
576
577declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(i32*, i64, i64)
578declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, <vscale x 1 x i1>, i64)
579
580define <vscale x 1 x i32> @test_vlsseg4_nxv1i32(i32* %base, i64 %offset, i64 %vl) {
581; CHECK-LABEL: test_vlsseg4_nxv1i32:
582; CHECK:       # %bb.0: # %entry
583; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
584; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1
585; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
586; CHECK-NEXT:    ret
587entry:
588  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(i32* %base, i64 %offset, i64 %vl)
589  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
590  ret <vscale x 1 x i32> %1
591}
592
593define <vscale x 1 x i32> @test_vlsseg4_mask_nxv1i32(i32* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
594; CHECK-LABEL: test_vlsseg4_mask_nxv1i32:
595; CHECK:       # %bb.0: # %entry
596; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
597; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1
598; CHECK-NEXT:    vmv1r.v v8, v7
599; CHECK-NEXT:    vmv1r.v v9, v7
600; CHECK-NEXT:    vmv1r.v v10, v7
601; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, tu, mu
602; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1, v0.t
603; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
604; CHECK-NEXT:    ret
605entry:
606  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(i32* %base, i64 %offset, i64 %vl)
607  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
608  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
609  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
610  ret <vscale x 1 x i32> %3
611}
612
613declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(i32*, i64, i64)
614declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, <vscale x 1 x i1>, i64)
615
616define <vscale x 1 x i32> @test_vlsseg5_nxv1i32(i32* %base, i64 %offset, i64 %vl) {
617; CHECK-LABEL: test_vlsseg5_nxv1i32:
618; CHECK:       # %bb.0: # %entry
619; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
620; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1
621; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
622; CHECK-NEXT:    ret
623entry:
624  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(i32* %base, i64 %offset, i64 %vl)
625  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
626  ret <vscale x 1 x i32> %1
627}
628
629define <vscale x 1 x i32> @test_vlsseg5_mask_nxv1i32(i32* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
630; CHECK-LABEL: test_vlsseg5_mask_nxv1i32:
631; CHECK:       # %bb.0: # %entry
632; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
633; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1
634; CHECK-NEXT:    vmv1r.v v8, v7
635; CHECK-NEXT:    vmv1r.v v9, v7
636; CHECK-NEXT:    vmv1r.v v10, v7
637; CHECK-NEXT:    vmv1r.v v11, v7
638; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, tu, mu
639; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1, v0.t
640; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
641; CHECK-NEXT:    ret
642entry:
643  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(i32* %base, i64 %offset, i64 %vl)
644  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
645  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
646  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
647  ret <vscale x 1 x i32> %3
648}
649
650declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(i32*, i64, i64)
651declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, <vscale x 1 x i1>, i64)
652
653define <vscale x 1 x i32> @test_vlsseg6_nxv1i32(i32* %base, i64 %offset, i64 %vl) {
654; CHECK-LABEL: test_vlsseg6_nxv1i32:
655; CHECK:       # %bb.0: # %entry
656; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
657; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1
658; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
659; CHECK-NEXT:    ret
660entry:
661  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(i32* %base, i64 %offset, i64 %vl)
662  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
663  ret <vscale x 1 x i32> %1
664}
665
666define <vscale x 1 x i32> @test_vlsseg6_mask_nxv1i32(i32* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
667; CHECK-LABEL: test_vlsseg6_mask_nxv1i32:
668; CHECK:       # %bb.0: # %entry
669; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
670; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1
671; CHECK-NEXT:    vmv1r.v v8, v7
672; CHECK-NEXT:    vmv1r.v v9, v7
673; CHECK-NEXT:    vmv1r.v v10, v7
674; CHECK-NEXT:    vmv1r.v v11, v7
675; CHECK-NEXT:    vmv1r.v v12, v7
676; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, tu, mu
677; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1, v0.t
678; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
679; CHECK-NEXT:    ret
680entry:
681  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(i32* %base, i64 %offset, i64 %vl)
682  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
683  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
684  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
685  ret <vscale x 1 x i32> %3
686}
687
688declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(i32*, i64, i64)
689declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, <vscale x 1 x i1>, i64)
690
691define <vscale x 1 x i32> @test_vlsseg7_nxv1i32(i32* %base, i64 %offset, i64 %vl) {
692; CHECK-LABEL: test_vlsseg7_nxv1i32:
693; CHECK:       # %bb.0: # %entry
694; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
695; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1
696; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
697; CHECK-NEXT:    ret
698entry:
699  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(i32* %base, i64 %offset, i64 %vl)
700  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
701  ret <vscale x 1 x i32> %1
702}
703
704define <vscale x 1 x i32> @test_vlsseg7_mask_nxv1i32(i32* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
705; CHECK-LABEL: test_vlsseg7_mask_nxv1i32:
706; CHECK:       # %bb.0: # %entry
707; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
708; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1
709; CHECK-NEXT:    vmv1r.v v8, v7
710; CHECK-NEXT:    vmv1r.v v9, v7
711; CHECK-NEXT:    vmv1r.v v10, v7
712; CHECK-NEXT:    vmv1r.v v11, v7
713; CHECK-NEXT:    vmv1r.v v12, v7
714; CHECK-NEXT:    vmv1r.v v13, v7
715; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, tu, mu
716; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1, v0.t
717; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
718; CHECK-NEXT:    ret
719entry:
720  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(i32* %base, i64 %offset, i64 %vl)
721  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
722  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
723  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
724  ret <vscale x 1 x i32> %3
725}
726
727declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(i32*, i64, i64)
728declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, <vscale x 1 x i1>, i64)
729
730define <vscale x 1 x i32> @test_vlsseg8_nxv1i32(i32* %base, i64 %offset, i64 %vl) {
731; CHECK-LABEL: test_vlsseg8_nxv1i32:
732; CHECK:       # %bb.0: # %entry
733; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
734; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1
735; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
736; CHECK-NEXT:    ret
737entry:
738  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(i32* %base, i64 %offset, i64 %vl)
739  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
740  ret <vscale x 1 x i32> %1
741}
742
743define <vscale x 1 x i32> @test_vlsseg8_mask_nxv1i32(i32* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
744; CHECK-LABEL: test_vlsseg8_mask_nxv1i32:
745; CHECK:       # %bb.0: # %entry
746; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
747; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1
748; CHECK-NEXT:    vmv1r.v v8, v7
749; CHECK-NEXT:    vmv1r.v v9, v7
750; CHECK-NEXT:    vmv1r.v v10, v7
751; CHECK-NEXT:    vmv1r.v v11, v7
752; CHECK-NEXT:    vmv1r.v v12, v7
753; CHECK-NEXT:    vmv1r.v v13, v7
754; CHECK-NEXT:    vmv1r.v v14, v7
755; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, tu, mu
756; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1, v0.t
757; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
758; CHECK-NEXT:    ret
759entry:
760  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(i32* %base, i64 %offset, i64 %vl)
761  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
762  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
763  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
764  ret <vscale x 1 x i32> %3
765}
766
767declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(i16*, i64, i64)
768declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, i64, <vscale x 8 x i1>, i64)
769
770define <vscale x 8 x i16> @test_vlsseg2_nxv8i16(i16* %base, i64 %offset, i64 %vl) {
771; CHECK-LABEL: test_vlsseg2_nxv8i16:
772; CHECK:       # %bb.0: # %entry
773; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
774; CHECK-NEXT:    vlsseg2e16.v v6, (a0), a1
775; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
776; CHECK-NEXT:    ret
777entry:
778  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(i16* %base, i64 %offset, i64 %vl)
779  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
780  ret <vscale x 8 x i16> %1
781}
782
783define <vscale x 8 x i16> @test_vlsseg2_mask_nxv8i16(i16* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
784; CHECK-LABEL: test_vlsseg2_mask_nxv8i16:
785; CHECK:       # %bb.0: # %entry
786; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
787; CHECK-NEXT:    vlsseg2e16.v v6, (a0), a1
788; CHECK-NEXT:    vmv2r.v v8, v6
789; CHECK-NEXT:    vsetvli zero, zero, e16, m2, tu, mu
790; CHECK-NEXT:    vlsseg2e16.v v6, (a0), a1, v0.t
791; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
792; CHECK-NEXT:    ret
793entry:
794  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(i16* %base, i64 %offset, i64 %vl)
795  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
796  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.mask.nxv8i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
797  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
798  ret <vscale x 8 x i16> %3
799}
800
801declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(i16*, i64, i64)
802declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, i64, <vscale x 8 x i1>, i64)
803
804define <vscale x 8 x i16> @test_vlsseg3_nxv8i16(i16* %base, i64 %offset, i64 %vl) {
805; CHECK-LABEL: test_vlsseg3_nxv8i16:
806; CHECK:       # %bb.0: # %entry
807; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
808; CHECK-NEXT:    vlsseg3e16.v v6, (a0), a1
809; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
810; CHECK-NEXT:    ret
811entry:
812  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(i16* %base, i64 %offset, i64 %vl)
813  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
814  ret <vscale x 8 x i16> %1
815}
816
817define <vscale x 8 x i16> @test_vlsseg3_mask_nxv8i16(i16* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
818; CHECK-LABEL: test_vlsseg3_mask_nxv8i16:
819; CHECK:       # %bb.0: # %entry
820; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
821; CHECK-NEXT:    vlsseg3e16.v v6, (a0), a1
822; CHECK-NEXT:    vmv2r.v v8, v6
823; CHECK-NEXT:    vmv2r.v v10, v6
824; CHECK-NEXT:    vsetvli zero, zero, e16, m2, tu, mu
825; CHECK-NEXT:    vlsseg3e16.v v6, (a0), a1, v0.t
826; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
827; CHECK-NEXT:    ret
828entry:
829  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(i16* %base, i64 %offset, i64 %vl)
830  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
831  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.mask.nxv8i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
832  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
833  ret <vscale x 8 x i16> %3
834}
835
836declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(i16*, i64, i64)
837declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, i64, <vscale x 8 x i1>, i64)
838
839define <vscale x 8 x i16> @test_vlsseg4_nxv8i16(i16* %base, i64 %offset, i64 %vl) {
840; CHECK-LABEL: test_vlsseg4_nxv8i16:
841; CHECK:       # %bb.0: # %entry
842; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
843; CHECK-NEXT:    vlsseg4e16.v v6, (a0), a1
844; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
845; CHECK-NEXT:    ret
846entry:
847  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(i16* %base, i64 %offset, i64 %vl)
848  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
849  ret <vscale x 8 x i16> %1
850}
851
852define <vscale x 8 x i16> @test_vlsseg4_mask_nxv8i16(i16* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
853; CHECK-LABEL: test_vlsseg4_mask_nxv8i16:
854; CHECK:       # %bb.0: # %entry
855; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
856; CHECK-NEXT:    vlsseg4e16.v v6, (a0), a1
857; CHECK-NEXT:    vmv2r.v v8, v6
858; CHECK-NEXT:    vmv2r.v v10, v6
859; CHECK-NEXT:    vmv2r.v v12, v6
860; CHECK-NEXT:    vsetvli zero, zero, e16, m2, tu, mu
861; CHECK-NEXT:    vlsseg4e16.v v6, (a0), a1, v0.t
862; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
863; CHECK-NEXT:    ret
864entry:
865  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(i16* %base, i64 %offset, i64 %vl)
866  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
867  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.mask.nxv8i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
868  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
869  ret <vscale x 8 x i16> %3
870}
871
872declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(i8*, i64, i64)
873declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, <vscale x 4 x i1>, i64)
874
875define <vscale x 4 x i8> @test_vlsseg2_nxv4i8(i8* %base, i64 %offset, i64 %vl) {
876; CHECK-LABEL: test_vlsseg2_nxv4i8:
877; CHECK:       # %bb.0: # %entry
878; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
879; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1
880; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
881; CHECK-NEXT:    ret
882entry:
883  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(i8* %base, i64 %offset, i64 %vl)
884  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
885  ret <vscale x 4 x i8> %1
886}
887
888define <vscale x 4 x i8> @test_vlsseg2_mask_nxv4i8(i8* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
889; CHECK-LABEL: test_vlsseg2_mask_nxv4i8:
890; CHECK:       # %bb.0: # %entry
891; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
892; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1
893; CHECK-NEXT:    vmv1r.v v8, v7
894; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, tu, mu
895; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1, v0.t
896; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
897; CHECK-NEXT:    ret
898entry:
899  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(i8* %base, i64 %offset, i64 %vl)
900  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
901  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
902  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
903  ret <vscale x 4 x i8> %3
904}
905
906declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(i8*, i64, i64)
907declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, <vscale x 4 x i1>, i64)
908
909define <vscale x 4 x i8> @test_vlsseg3_nxv4i8(i8* %base, i64 %offset, i64 %vl) {
910; CHECK-LABEL: test_vlsseg3_nxv4i8:
911; CHECK:       # %bb.0: # %entry
912; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
913; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1
914; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
915; CHECK-NEXT:    ret
916entry:
917  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(i8* %base, i64 %offset, i64 %vl)
918  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
919  ret <vscale x 4 x i8> %1
920}
921
922define <vscale x 4 x i8> @test_vlsseg3_mask_nxv4i8(i8* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
923; CHECK-LABEL: test_vlsseg3_mask_nxv4i8:
924; CHECK:       # %bb.0: # %entry
925; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
926; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1
927; CHECK-NEXT:    vmv1r.v v8, v7
928; CHECK-NEXT:    vmv1r.v v9, v7
929; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, tu, mu
930; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1, v0.t
931; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
932; CHECK-NEXT:    ret
933entry:
934  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(i8* %base, i64 %offset, i64 %vl)
935  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
936  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
937  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
938  ret <vscale x 4 x i8> %3
939}
940
941declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(i8*, i64, i64)
942declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, <vscale x 4 x i1>, i64)
943
944define <vscale x 4 x i8> @test_vlsseg4_nxv4i8(i8* %base, i64 %offset, i64 %vl) {
945; CHECK-LABEL: test_vlsseg4_nxv4i8:
946; CHECK:       # %bb.0: # %entry
947; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
948; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1
949; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
950; CHECK-NEXT:    ret
951entry:
952  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(i8* %base, i64 %offset, i64 %vl)
953  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
954  ret <vscale x 4 x i8> %1
955}
956
957define <vscale x 4 x i8> @test_vlsseg4_mask_nxv4i8(i8* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
958; CHECK-LABEL: test_vlsseg4_mask_nxv4i8:
959; CHECK:       # %bb.0: # %entry
960; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
961; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1
962; CHECK-NEXT:    vmv1r.v v8, v7
963; CHECK-NEXT:    vmv1r.v v9, v7
964; CHECK-NEXT:    vmv1r.v v10, v7
965; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, tu, mu
966; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1, v0.t
967; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
968; CHECK-NEXT:    ret
969entry:
970  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(i8* %base, i64 %offset, i64 %vl)
971  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
972  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
973  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
974  ret <vscale x 4 x i8> %3
975}
976
977declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(i8*, i64, i64)
978declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, <vscale x 4 x i1>, i64)
979
980define <vscale x 4 x i8> @test_vlsseg5_nxv4i8(i8* %base, i64 %offset, i64 %vl) {
981; CHECK-LABEL: test_vlsseg5_nxv4i8:
982; CHECK:       # %bb.0: # %entry
983; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
984; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1
985; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
986; CHECK-NEXT:    ret
987entry:
988  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(i8* %base, i64 %offset, i64 %vl)
989  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
990  ret <vscale x 4 x i8> %1
991}
992
993define <vscale x 4 x i8> @test_vlsseg5_mask_nxv4i8(i8* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
994; CHECK-LABEL: test_vlsseg5_mask_nxv4i8:
995; CHECK:       # %bb.0: # %entry
996; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
997; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1
998; CHECK-NEXT:    vmv1r.v v8, v7
999; CHECK-NEXT:    vmv1r.v v9, v7
1000; CHECK-NEXT:    vmv1r.v v10, v7
1001; CHECK-NEXT:    vmv1r.v v11, v7
1002; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, tu, mu
1003; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1, v0.t
1004; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
1005; CHECK-NEXT:    ret
1006entry:
1007  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(i8* %base, i64 %offset, i64 %vl)
1008  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
1009  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
1010  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
1011  ret <vscale x 4 x i8> %3
1012}
1013
1014declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(i8*, i64, i64)
1015declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, <vscale x 4 x i1>, i64)
1016
1017define <vscale x 4 x i8> @test_vlsseg6_nxv4i8(i8* %base, i64 %offset, i64 %vl) {
1018; CHECK-LABEL: test_vlsseg6_nxv4i8:
1019; CHECK:       # %bb.0: # %entry
1020; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
1021; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1
1022; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
1023; CHECK-NEXT:    ret
1024entry:
1025  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(i8* %base, i64 %offset, i64 %vl)
1026  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
1027  ret <vscale x 4 x i8> %1
1028}
1029
1030define <vscale x 4 x i8> @test_vlsseg6_mask_nxv4i8(i8* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
1031; CHECK-LABEL: test_vlsseg6_mask_nxv4i8:
1032; CHECK:       # %bb.0: # %entry
1033; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
1034; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1
1035; CHECK-NEXT:    vmv1r.v v8, v7
1036; CHECK-NEXT:    vmv1r.v v9, v7
1037; CHECK-NEXT:    vmv1r.v v10, v7
1038; CHECK-NEXT:    vmv1r.v v11, v7
1039; CHECK-NEXT:    vmv1r.v v12, v7
1040; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, tu, mu
1041; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1, v0.t
1042; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
1043; CHECK-NEXT:    ret
1044entry:
1045  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(i8* %base, i64 %offset, i64 %vl)
1046  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
1047  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
1048  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
1049  ret <vscale x 4 x i8> %3
1050}
1051
1052declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(i8*, i64, i64)
1053declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, <vscale x 4 x i1>, i64)
1054
1055define <vscale x 4 x i8> @test_vlsseg7_nxv4i8(i8* %base, i64 %offset, i64 %vl) {
1056; CHECK-LABEL: test_vlsseg7_nxv4i8:
1057; CHECK:       # %bb.0: # %entry
1058; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
1059; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1
1060; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
1061; CHECK-NEXT:    ret
1062entry:
1063  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(i8* %base, i64 %offset, i64 %vl)
1064  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
1065  ret <vscale x 4 x i8> %1
1066}
1067
1068define <vscale x 4 x i8> @test_vlsseg7_mask_nxv4i8(i8* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
1069; CHECK-LABEL: test_vlsseg7_mask_nxv4i8:
1070; CHECK:       # %bb.0: # %entry
1071; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
1072; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1
1073; CHECK-NEXT:    vmv1r.v v8, v7
1074; CHECK-NEXT:    vmv1r.v v9, v7
1075; CHECK-NEXT:    vmv1r.v v10, v7
1076; CHECK-NEXT:    vmv1r.v v11, v7
1077; CHECK-NEXT:    vmv1r.v v12, v7
1078; CHECK-NEXT:    vmv1r.v v13, v7
1079; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, tu, mu
1080; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1, v0.t
1081; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
1082; CHECK-NEXT:    ret
1083entry:
1084  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(i8* %base, i64 %offset, i64 %vl)
1085  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
1086  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
1087  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
1088  ret <vscale x 4 x i8> %3
1089}
1090
1091declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(i8*, i64, i64)
1092declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, <vscale x 4 x i1>, i64)
1093
1094define <vscale x 4 x i8> @test_vlsseg8_nxv4i8(i8* %base, i64 %offset, i64 %vl) {
1095; CHECK-LABEL: test_vlsseg8_nxv4i8:
1096; CHECK:       # %bb.0: # %entry
1097; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
1098; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1
1099; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
1100; CHECK-NEXT:    ret
1101entry:
1102  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(i8* %base, i64 %offset, i64 %vl)
1103  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
1104  ret <vscale x 4 x i8> %1
1105}
1106
1107define <vscale x 4 x i8> @test_vlsseg8_mask_nxv4i8(i8* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
1108; CHECK-LABEL: test_vlsseg8_mask_nxv4i8:
1109; CHECK:       # %bb.0: # %entry
1110; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
1111; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1
1112; CHECK-NEXT:    vmv1r.v v8, v7
1113; CHECK-NEXT:    vmv1r.v v9, v7
1114; CHECK-NEXT:    vmv1r.v v10, v7
1115; CHECK-NEXT:    vmv1r.v v11, v7
1116; CHECK-NEXT:    vmv1r.v v12, v7
1117; CHECK-NEXT:    vmv1r.v v13, v7
1118; CHECK-NEXT:    vmv1r.v v14, v7
1119; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, tu, mu
1120; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1, v0.t
1121; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
1122; CHECK-NEXT:    ret
1123entry:
1124  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(i8* %base, i64 %offset, i64 %vl)
1125  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
1126  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
1127  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
1128  ret <vscale x 4 x i8> %3
1129}
1130
1131declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(i16*, i64, i64)
1132declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, <vscale x 1 x i1>, i64)
1133
1134define <vscale x 1 x i16> @test_vlsseg2_nxv1i16(i16* %base, i64 %offset, i64 %vl) {
1135; CHECK-LABEL: test_vlsseg2_nxv1i16:
1136; CHECK:       # %bb.0: # %entry
1137; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
1138; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
1139; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
1140; CHECK-NEXT:    ret
1141entry:
1142  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(i16* %base, i64 %offset, i64 %vl)
1143  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
1144  ret <vscale x 1 x i16> %1
1145}
1146
1147define <vscale x 1 x i16> @test_vlsseg2_mask_nxv1i16(i16* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1148; CHECK-LABEL: test_vlsseg2_mask_nxv1i16:
1149; CHECK:       # %bb.0: # %entry
1150; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
1151; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
1152; CHECK-NEXT:    vmv1r.v v8, v7
1153; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, tu, mu
1154; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1, v0.t
1155; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
1156; CHECK-NEXT:    ret
1157entry:
1158  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(i16* %base, i64 %offset, i64 %vl)
1159  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
1160  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
1161  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
1162  ret <vscale x 1 x i16> %3
1163}
1164
1165declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(i16*, i64, i64)
1166declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, <vscale x 1 x i1>, i64)
1167
1168define <vscale x 1 x i16> @test_vlsseg3_nxv1i16(i16* %base, i64 %offset, i64 %vl) {
1169; CHECK-LABEL: test_vlsseg3_nxv1i16:
1170; CHECK:       # %bb.0: # %entry
1171; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
1172; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1
1173; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
1174; CHECK-NEXT:    ret
1175entry:
1176  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(i16* %base, i64 %offset, i64 %vl)
1177  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
1178  ret <vscale x 1 x i16> %1
1179}
1180
1181define <vscale x 1 x i16> @test_vlsseg3_mask_nxv1i16(i16* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1182; CHECK-LABEL: test_vlsseg3_mask_nxv1i16:
1183; CHECK:       # %bb.0: # %entry
1184; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
1185; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1
1186; CHECK-NEXT:    vmv1r.v v8, v7
1187; CHECK-NEXT:    vmv1r.v v9, v7
1188; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, tu, mu
1189; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1, v0.t
1190; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
1191; CHECK-NEXT:    ret
1192entry:
1193  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(i16* %base, i64 %offset, i64 %vl)
1194  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
1195  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
1196  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
1197  ret <vscale x 1 x i16> %3
1198}
1199
1200declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(i16*, i64, i64)
1201declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, <vscale x 1 x i1>, i64)
1202
1203define <vscale x 1 x i16> @test_vlsseg4_nxv1i16(i16* %base, i64 %offset, i64 %vl) {
1204; CHECK-LABEL: test_vlsseg4_nxv1i16:
1205; CHECK:       # %bb.0: # %entry
1206; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
1207; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1
1208; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
1209; CHECK-NEXT:    ret
1210entry:
1211  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(i16* %base, i64 %offset, i64 %vl)
1212  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
1213  ret <vscale x 1 x i16> %1
1214}
1215
1216define <vscale x 1 x i16> @test_vlsseg4_mask_nxv1i16(i16* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1217; CHECK-LABEL: test_vlsseg4_mask_nxv1i16:
1218; CHECK:       # %bb.0: # %entry
1219; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
1220; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1
1221; CHECK-NEXT:    vmv1r.v v8, v7
1222; CHECK-NEXT:    vmv1r.v v9, v7
1223; CHECK-NEXT:    vmv1r.v v10, v7
1224; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, tu, mu
1225; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1, v0.t
1226; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
1227; CHECK-NEXT:    ret
1228entry:
1229  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(i16* %base, i64 %offset, i64 %vl)
1230  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
1231  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
1232  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
1233  ret <vscale x 1 x i16> %3
1234}
1235
1236declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(i16*, i64, i64)
1237declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, <vscale x 1 x i1>, i64)
1238
1239define <vscale x 1 x i16> @test_vlsseg5_nxv1i16(i16* %base, i64 %offset, i64 %vl) {
1240; CHECK-LABEL: test_vlsseg5_nxv1i16:
1241; CHECK:       # %bb.0: # %entry
1242; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
1243; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1
1244; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
1245; CHECK-NEXT:    ret
1246entry:
1247  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(i16* %base, i64 %offset, i64 %vl)
1248  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
1249  ret <vscale x 1 x i16> %1
1250}
1251
1252define <vscale x 1 x i16> @test_vlsseg5_mask_nxv1i16(i16* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1253; CHECK-LABEL: test_vlsseg5_mask_nxv1i16:
1254; CHECK:       # %bb.0: # %entry
1255; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
1256; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1
1257; CHECK-NEXT:    vmv1r.v v8, v7
1258; CHECK-NEXT:    vmv1r.v v9, v7
1259; CHECK-NEXT:    vmv1r.v v10, v7
1260; CHECK-NEXT:    vmv1r.v v11, v7
1261; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, tu, mu
1262; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1, v0.t
1263; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
1264; CHECK-NEXT:    ret
1265entry:
1266  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(i16* %base, i64 %offset, i64 %vl)
1267  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
1268  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
1269  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
1270  ret <vscale x 1 x i16> %3
1271}
1272
1273declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(i16*, i64, i64)
1274declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, <vscale x 1 x i1>, i64)
1275
1276define <vscale x 1 x i16> @test_vlsseg6_nxv1i16(i16* %base, i64 %offset, i64 %vl) {
1277; CHECK-LABEL: test_vlsseg6_nxv1i16:
1278; CHECK:       # %bb.0: # %entry
1279; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
1280; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1
1281; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
1282; CHECK-NEXT:    ret
1283entry:
1284  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(i16* %base, i64 %offset, i64 %vl)
1285  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
1286  ret <vscale x 1 x i16> %1
1287}
1288
1289define <vscale x 1 x i16> @test_vlsseg6_mask_nxv1i16(i16* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1290; CHECK-LABEL: test_vlsseg6_mask_nxv1i16:
1291; CHECK:       # %bb.0: # %entry
1292; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
1293; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1
1294; CHECK-NEXT:    vmv1r.v v8, v7
1295; CHECK-NEXT:    vmv1r.v v9, v7
1296; CHECK-NEXT:    vmv1r.v v10, v7
1297; CHECK-NEXT:    vmv1r.v v11, v7
1298; CHECK-NEXT:    vmv1r.v v12, v7
1299; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, tu, mu
1300; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1, v0.t
1301; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
1302; CHECK-NEXT:    ret
1303entry:
1304  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(i16* %base, i64 %offset, i64 %vl)
1305  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
1306  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
1307  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
1308  ret <vscale x 1 x i16> %3
1309}
1310
1311declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(i16*, i64, i64)
1312declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, <vscale x 1 x i1>, i64)
1313
1314define <vscale x 1 x i16> @test_vlsseg7_nxv1i16(i16* %base, i64 %offset, i64 %vl) {
1315; CHECK-LABEL: test_vlsseg7_nxv1i16:
1316; CHECK:       # %bb.0: # %entry
1317; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
1318; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1
1319; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
1320; CHECK-NEXT:    ret
1321entry:
1322  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(i16* %base, i64 %offset, i64 %vl)
1323  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
1324  ret <vscale x 1 x i16> %1
1325}
1326
1327define <vscale x 1 x i16> @test_vlsseg7_mask_nxv1i16(i16* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1328; CHECK-LABEL: test_vlsseg7_mask_nxv1i16:
1329; CHECK:       # %bb.0: # %entry
1330; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
1331; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1
1332; CHECK-NEXT:    vmv1r.v v8, v7
1333; CHECK-NEXT:    vmv1r.v v9, v7
1334; CHECK-NEXT:    vmv1r.v v10, v7
1335; CHECK-NEXT:    vmv1r.v v11, v7
1336; CHECK-NEXT:    vmv1r.v v12, v7
1337; CHECK-NEXT:    vmv1r.v v13, v7
1338; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, tu, mu
1339; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1, v0.t
1340; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
1341; CHECK-NEXT:    ret
1342entry:
1343  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(i16* %base, i64 %offset, i64 %vl)
1344  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
1345  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
1346  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
1347  ret <vscale x 1 x i16> %3
1348}
1349
1350declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(i16*, i64, i64)
1351declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, <vscale x 1 x i1>, i64)
1352
1353define <vscale x 1 x i16> @test_vlsseg8_nxv1i16(i16* %base, i64 %offset, i64 %vl) {
1354; CHECK-LABEL: test_vlsseg8_nxv1i16:
1355; CHECK:       # %bb.0: # %entry
1356; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
1357; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1
1358; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
1359; CHECK-NEXT:    ret
1360entry:
1361  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(i16* %base, i64 %offset, i64 %vl)
1362  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
1363  ret <vscale x 1 x i16> %1
1364}
1365
1366define <vscale x 1 x i16> @test_vlsseg8_mask_nxv1i16(i16* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1367; CHECK-LABEL: test_vlsseg8_mask_nxv1i16:
1368; CHECK:       # %bb.0: # %entry
1369; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
1370; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1
1371; CHECK-NEXT:    vmv1r.v v8, v7
1372; CHECK-NEXT:    vmv1r.v v9, v7
1373; CHECK-NEXT:    vmv1r.v v10, v7
1374; CHECK-NEXT:    vmv1r.v v11, v7
1375; CHECK-NEXT:    vmv1r.v v12, v7
1376; CHECK-NEXT:    vmv1r.v v13, v7
1377; CHECK-NEXT:    vmv1r.v v14, v7
1378; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, tu, mu
1379; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1, v0.t
1380; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
1381; CHECK-NEXT:    ret
1382entry:
1383  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(i16* %base, i64 %offset, i64 %vl)
1384  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
1385  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
1386  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
1387  ret <vscale x 1 x i16> %3
1388}
1389
1390declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(i32*, i64, i64)
1391declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, <vscale x 2 x i1>, i64)
1392
1393define <vscale x 2 x i32> @test_vlsseg2_nxv2i32(i32* %base, i64 %offset, i64 %vl) {
1394; CHECK-LABEL: test_vlsseg2_nxv2i32:
1395; CHECK:       # %bb.0: # %entry
1396; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
1397; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1
1398; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
1399; CHECK-NEXT:    ret
1400entry:
1401  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(i32* %base, i64 %offset, i64 %vl)
1402  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1403  ret <vscale x 2 x i32> %1
1404}
1405
1406define <vscale x 2 x i32> @test_vlsseg2_mask_nxv2i32(i32* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1407; CHECK-LABEL: test_vlsseg2_mask_nxv2i32:
1408; CHECK:       # %bb.0: # %entry
1409; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
1410; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1
1411; CHECK-NEXT:    vmv1r.v v8, v7
1412; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
1413; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1, v0.t
1414; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
1415; CHECK-NEXT:    ret
1416entry:
1417  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(i32* %base, i64 %offset, i64 %vl)
1418  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
1419  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
1420  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
1421  ret <vscale x 2 x i32> %3
1422}
1423
1424declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(i32*, i64, i64)
1425declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, <vscale x 2 x i1>, i64)
1426
1427define <vscale x 2 x i32> @test_vlsseg3_nxv2i32(i32* %base, i64 %offset, i64 %vl) {
1428; CHECK-LABEL: test_vlsseg3_nxv2i32:
1429; CHECK:       # %bb.0: # %entry
1430; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
1431; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1
1432; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
1433; CHECK-NEXT:    ret
1434entry:
1435  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(i32* %base, i64 %offset, i64 %vl)
1436  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1437  ret <vscale x 2 x i32> %1
1438}
1439
1440define <vscale x 2 x i32> @test_vlsseg3_mask_nxv2i32(i32* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1441; CHECK-LABEL: test_vlsseg3_mask_nxv2i32:
1442; CHECK:       # %bb.0: # %entry
1443; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
1444; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1
1445; CHECK-NEXT:    vmv1r.v v8, v7
1446; CHECK-NEXT:    vmv1r.v v9, v7
1447; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
1448; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1, v0.t
1449; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
1450; CHECK-NEXT:    ret
1451entry:
1452  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(i32* %base, i64 %offset, i64 %vl)
1453  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
1454  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
1455  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
1456  ret <vscale x 2 x i32> %3
1457}
1458
1459declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(i32*, i64, i64)
1460declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, <vscale x 2 x i1>, i64)
1461
1462define <vscale x 2 x i32> @test_vlsseg4_nxv2i32(i32* %base, i64 %offset, i64 %vl) {
1463; CHECK-LABEL: test_vlsseg4_nxv2i32:
1464; CHECK:       # %bb.0: # %entry
1465; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
1466; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1
1467; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
1468; CHECK-NEXT:    ret
1469entry:
1470  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(i32* %base, i64 %offset, i64 %vl)
1471  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1472  ret <vscale x 2 x i32> %1
1473}
1474
1475define <vscale x 2 x i32> @test_vlsseg4_mask_nxv2i32(i32* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1476; CHECK-LABEL: test_vlsseg4_mask_nxv2i32:
1477; CHECK:       # %bb.0: # %entry
1478; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
1479; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1
1480; CHECK-NEXT:    vmv1r.v v8, v7
1481; CHECK-NEXT:    vmv1r.v v9, v7
1482; CHECK-NEXT:    vmv1r.v v10, v7
1483; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
1484; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1, v0.t
1485; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
1486; CHECK-NEXT:    ret
1487entry:
1488  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(i32* %base, i64 %offset, i64 %vl)
1489  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
1490  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
1491  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
1492  ret <vscale x 2 x i32> %3
1493}
1494
1495declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(i32*, i64, i64)
1496declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, <vscale x 2 x i1>, i64)
1497
1498define <vscale x 2 x i32> @test_vlsseg5_nxv2i32(i32* %base, i64 %offset, i64 %vl) {
1499; CHECK-LABEL: test_vlsseg5_nxv2i32:
1500; CHECK:       # %bb.0: # %entry
1501; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
1502; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1
1503; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
1504; CHECK-NEXT:    ret
1505entry:
1506  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(i32* %base, i64 %offset, i64 %vl)
1507  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1508  ret <vscale x 2 x i32> %1
1509}
1510
1511define <vscale x 2 x i32> @test_vlsseg5_mask_nxv2i32(i32* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1512; CHECK-LABEL: test_vlsseg5_mask_nxv2i32:
1513; CHECK:       # %bb.0: # %entry
1514; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
1515; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1
1516; CHECK-NEXT:    vmv1r.v v8, v7
1517; CHECK-NEXT:    vmv1r.v v9, v7
1518; CHECK-NEXT:    vmv1r.v v10, v7
1519; CHECK-NEXT:    vmv1r.v v11, v7
1520; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
1521; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1, v0.t
1522; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
1523; CHECK-NEXT:    ret
1524entry:
1525  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(i32* %base, i64 %offset, i64 %vl)
1526  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
1527  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
1528  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
1529  ret <vscale x 2 x i32> %3
1530}
1531
1532declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(i32*, i64, i64)
1533declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, <vscale x 2 x i1>, i64)
1534
1535define <vscale x 2 x i32> @test_vlsseg6_nxv2i32(i32* %base, i64 %offset, i64 %vl) {
1536; CHECK-LABEL: test_vlsseg6_nxv2i32:
1537; CHECK:       # %bb.0: # %entry
1538; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
1539; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1
1540; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
1541; CHECK-NEXT:    ret
1542entry:
1543  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(i32* %base, i64 %offset, i64 %vl)
1544  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1545  ret <vscale x 2 x i32> %1
1546}
1547
1548define <vscale x 2 x i32> @test_vlsseg6_mask_nxv2i32(i32* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1549; CHECK-LABEL: test_vlsseg6_mask_nxv2i32:
1550; CHECK:       # %bb.0: # %entry
1551; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
1552; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1
1553; CHECK-NEXT:    vmv1r.v v8, v7
1554; CHECK-NEXT:    vmv1r.v v9, v7
1555; CHECK-NEXT:    vmv1r.v v10, v7
1556; CHECK-NEXT:    vmv1r.v v11, v7
1557; CHECK-NEXT:    vmv1r.v v12, v7
1558; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
1559; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1, v0.t
1560; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
1561; CHECK-NEXT:    ret
1562entry:
1563  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(i32* %base, i64 %offset, i64 %vl)
1564  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
1565  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
1566  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
1567  ret <vscale x 2 x i32> %3
1568}
1569
1570declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(i32*, i64, i64)
1571declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, <vscale x 2 x i1>, i64)
1572
1573define <vscale x 2 x i32> @test_vlsseg7_nxv2i32(i32* %base, i64 %offset, i64 %vl) {
1574; CHECK-LABEL: test_vlsseg7_nxv2i32:
1575; CHECK:       # %bb.0: # %entry
1576; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
1577; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1
1578; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
1579; CHECK-NEXT:    ret
1580entry:
1581  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(i32* %base, i64 %offset, i64 %vl)
1582  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1583  ret <vscale x 2 x i32> %1
1584}
1585
1586define <vscale x 2 x i32> @test_vlsseg7_mask_nxv2i32(i32* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1587; CHECK-LABEL: test_vlsseg7_mask_nxv2i32:
1588; CHECK:       # %bb.0: # %entry
1589; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
1590; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1
1591; CHECK-NEXT:    vmv1r.v v8, v7
1592; CHECK-NEXT:    vmv1r.v v9, v7
1593; CHECK-NEXT:    vmv1r.v v10, v7
1594; CHECK-NEXT:    vmv1r.v v11, v7
1595; CHECK-NEXT:    vmv1r.v v12, v7
1596; CHECK-NEXT:    vmv1r.v v13, v7
1597; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
1598; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1, v0.t
1599; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
1600; CHECK-NEXT:    ret
1601entry:
1602  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(i32* %base, i64 %offset, i64 %vl)
1603  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
1604  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
1605  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
1606  ret <vscale x 2 x i32> %3
1607}
1608
1609declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(i32*, i64, i64)
1610declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, <vscale x 2 x i1>, i64)
1611
1612define <vscale x 2 x i32> @test_vlsseg8_nxv2i32(i32* %base, i64 %offset, i64 %vl) {
1613; CHECK-LABEL: test_vlsseg8_nxv2i32:
1614; CHECK:       # %bb.0: # %entry
1615; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
1616; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1
1617; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
1618; CHECK-NEXT:    ret
1619entry:
1620  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(i32* %base, i64 %offset, i64 %vl)
1621  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1622  ret <vscale x 2 x i32> %1
1623}
1624
1625define <vscale x 2 x i32> @test_vlsseg8_mask_nxv2i32(i32* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1626; CHECK-LABEL: test_vlsseg8_mask_nxv2i32:
1627; CHECK:       # %bb.0: # %entry
1628; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
1629; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1
1630; CHECK-NEXT:    vmv1r.v v8, v7
1631; CHECK-NEXT:    vmv1r.v v9, v7
1632; CHECK-NEXT:    vmv1r.v v10, v7
1633; CHECK-NEXT:    vmv1r.v v11, v7
1634; CHECK-NEXT:    vmv1r.v v12, v7
1635; CHECK-NEXT:    vmv1r.v v13, v7
1636; CHECK-NEXT:    vmv1r.v v14, v7
1637; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
1638; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1, v0.t
1639; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
1640; CHECK-NEXT:    ret
1641entry:
1642  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(i32* %base, i64 %offset, i64 %vl)
1643  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
1644  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
1645  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
1646  ret <vscale x 2 x i32> %3
1647}
1648
1649declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(i8*, i64, i64)
1650declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, <vscale x 8 x i1>, i64)
1651
1652define <vscale x 8 x i8> @test_vlsseg2_nxv8i8(i8* %base, i64 %offset, i64 %vl) {
1653; CHECK-LABEL: test_vlsseg2_nxv8i8:
1654; CHECK:       # %bb.0: # %entry
1655; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
1656; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1
1657; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
1658; CHECK-NEXT:    ret
1659entry:
1660  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(i8* %base, i64 %offset, i64 %vl)
1661  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
1662  ret <vscale x 8 x i8> %1
1663}
1664
1665define <vscale x 8 x i8> @test_vlsseg2_mask_nxv8i8(i8* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
1666; CHECK-LABEL: test_vlsseg2_mask_nxv8i8:
1667; CHECK:       # %bb.0: # %entry
1668; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
1669; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1
1670; CHECK-NEXT:    vmv1r.v v8, v7
1671; CHECK-NEXT:    vsetvli zero, zero, e8, m1, tu, mu
1672; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1, v0.t
1673; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
1674; CHECK-NEXT:    ret
1675entry:
1676  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(i8* %base, i64 %offset, i64 %vl)
1677  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
1678  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
1679  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
1680  ret <vscale x 8 x i8> %3
1681}
1682
1683declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(i8*, i64, i64)
1684declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, <vscale x 8 x i1>, i64)
1685
1686define <vscale x 8 x i8> @test_vlsseg3_nxv8i8(i8* %base, i64 %offset, i64 %vl) {
1687; CHECK-LABEL: test_vlsseg3_nxv8i8:
1688; CHECK:       # %bb.0: # %entry
1689; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
1690; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1
1691; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
1692; CHECK-NEXT:    ret
1693entry:
1694  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(i8* %base, i64 %offset, i64 %vl)
1695  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
1696  ret <vscale x 8 x i8> %1
1697}
1698
1699define <vscale x 8 x i8> @test_vlsseg3_mask_nxv8i8(i8* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
1700; CHECK-LABEL: test_vlsseg3_mask_nxv8i8:
1701; CHECK:       # %bb.0: # %entry
1702; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
1703; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1
1704; CHECK-NEXT:    vmv1r.v v8, v7
1705; CHECK-NEXT:    vmv1r.v v9, v7
1706; CHECK-NEXT:    vsetvli zero, zero, e8, m1, tu, mu
1707; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1, v0.t
1708; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
1709; CHECK-NEXT:    ret
1710entry:
1711  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(i8* %base, i64 %offset, i64 %vl)
1712  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
1713  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
1714  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
1715  ret <vscale x 8 x i8> %3
1716}
1717
1718declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(i8*, i64, i64)
1719declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, <vscale x 8 x i1>, i64)
1720
1721define <vscale x 8 x i8> @test_vlsseg4_nxv8i8(i8* %base, i64 %offset, i64 %vl) {
1722; CHECK-LABEL: test_vlsseg4_nxv8i8:
1723; CHECK:       # %bb.0: # %entry
1724; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
1725; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1
1726; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
1727; CHECK-NEXT:    ret
1728entry:
1729  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(i8* %base, i64 %offset, i64 %vl)
1730  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
1731  ret <vscale x 8 x i8> %1
1732}
1733
1734define <vscale x 8 x i8> @test_vlsseg4_mask_nxv8i8(i8* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
1735; CHECK-LABEL: test_vlsseg4_mask_nxv8i8:
1736; CHECK:       # %bb.0: # %entry
1737; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
1738; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1
1739; CHECK-NEXT:    vmv1r.v v8, v7
1740; CHECK-NEXT:    vmv1r.v v9, v7
1741; CHECK-NEXT:    vmv1r.v v10, v7
1742; CHECK-NEXT:    vsetvli zero, zero, e8, m1, tu, mu
1743; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1, v0.t
1744; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
1745; CHECK-NEXT:    ret
1746entry:
1747  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(i8* %base, i64 %offset, i64 %vl)
1748  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
1749  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
1750  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
1751  ret <vscale x 8 x i8> %3
1752}
1753
1754declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(i8*, i64, i64)
1755declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, <vscale x 8 x i1>, i64)
1756
1757define <vscale x 8 x i8> @test_vlsseg5_nxv8i8(i8* %base, i64 %offset, i64 %vl) {
1758; CHECK-LABEL: test_vlsseg5_nxv8i8:
1759; CHECK:       # %bb.0: # %entry
1760; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
1761; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1
1762; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
1763; CHECK-NEXT:    ret
1764entry:
1765  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(i8* %base, i64 %offset, i64 %vl)
1766  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
1767  ret <vscale x 8 x i8> %1
1768}
1769
1770define <vscale x 8 x i8> @test_vlsseg5_mask_nxv8i8(i8* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
1771; CHECK-LABEL: test_vlsseg5_mask_nxv8i8:
1772; CHECK:       # %bb.0: # %entry
1773; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
1774; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1
1775; CHECK-NEXT:    vmv1r.v v8, v7
1776; CHECK-NEXT:    vmv1r.v v9, v7
1777; CHECK-NEXT:    vmv1r.v v10, v7
1778; CHECK-NEXT:    vmv1r.v v11, v7
1779; CHECK-NEXT:    vsetvli zero, zero, e8, m1, tu, mu
1780; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1, v0.t
1781; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
1782; CHECK-NEXT:    ret
1783entry:
1784  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(i8* %base, i64 %offset, i64 %vl)
1785  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
1786  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
1787  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
1788  ret <vscale x 8 x i8> %3
1789}
1790
1791declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(i8*, i64, i64)
1792declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, <vscale x 8 x i1>, i64)
1793
1794define <vscale x 8 x i8> @test_vlsseg6_nxv8i8(i8* %base, i64 %offset, i64 %vl) {
1795; CHECK-LABEL: test_vlsseg6_nxv8i8:
1796; CHECK:       # %bb.0: # %entry
1797; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
1798; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1
1799; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
1800; CHECK-NEXT:    ret
1801entry:
1802  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(i8* %base, i64 %offset, i64 %vl)
1803  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
1804  ret <vscale x 8 x i8> %1
1805}
1806
1807define <vscale x 8 x i8> @test_vlsseg6_mask_nxv8i8(i8* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
1808; CHECK-LABEL: test_vlsseg6_mask_nxv8i8:
1809; CHECK:       # %bb.0: # %entry
1810; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
1811; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1
1812; CHECK-NEXT:    vmv1r.v v8, v7
1813; CHECK-NEXT:    vmv1r.v v9, v7
1814; CHECK-NEXT:    vmv1r.v v10, v7
1815; CHECK-NEXT:    vmv1r.v v11, v7
1816; CHECK-NEXT:    vmv1r.v v12, v7
1817; CHECK-NEXT:    vsetvli zero, zero, e8, m1, tu, mu
1818; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1, v0.t
1819; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
1820; CHECK-NEXT:    ret
1821entry:
1822  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(i8* %base, i64 %offset, i64 %vl)
1823  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
1824  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
1825  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
1826  ret <vscale x 8 x i8> %3
1827}
1828
1829declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(i8*, i64, i64)
1830declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, <vscale x 8 x i1>, i64)
1831
1832define <vscale x 8 x i8> @test_vlsseg7_nxv8i8(i8* %base, i64 %offset, i64 %vl) {
1833; CHECK-LABEL: test_vlsseg7_nxv8i8:
1834; CHECK:       # %bb.0: # %entry
1835; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
1836; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1
1837; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
1838; CHECK-NEXT:    ret
1839entry:
1840  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(i8* %base, i64 %offset, i64 %vl)
1841  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
1842  ret <vscale x 8 x i8> %1
1843}
1844
1845define <vscale x 8 x i8> @test_vlsseg7_mask_nxv8i8(i8* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
1846; CHECK-LABEL: test_vlsseg7_mask_nxv8i8:
1847; CHECK:       # %bb.0: # %entry
1848; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
1849; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1
1850; CHECK-NEXT:    vmv1r.v v8, v7
1851; CHECK-NEXT:    vmv1r.v v9, v7
1852; CHECK-NEXT:    vmv1r.v v10, v7
1853; CHECK-NEXT:    vmv1r.v v11, v7
1854; CHECK-NEXT:    vmv1r.v v12, v7
1855; CHECK-NEXT:    vmv1r.v v13, v7
1856; CHECK-NEXT:    vsetvli zero, zero, e8, m1, tu, mu
1857; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1, v0.t
1858; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
1859; CHECK-NEXT:    ret
1860entry:
1861  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(i8* %base, i64 %offset, i64 %vl)
1862  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
1863  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
1864  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
1865  ret <vscale x 8 x i8> %3
1866}
1867
1868declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(i8*, i64, i64)
1869declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, <vscale x 8 x i1>, i64)
1870
1871define <vscale x 8 x i8> @test_vlsseg8_nxv8i8(i8* %base, i64 %offset, i64 %vl) {
1872; CHECK-LABEL: test_vlsseg8_nxv8i8:
1873; CHECK:       # %bb.0: # %entry
1874; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
1875; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1
1876; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
1877; CHECK-NEXT:    ret
1878entry:
1879  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(i8* %base, i64 %offset, i64 %vl)
1880  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
1881  ret <vscale x 8 x i8> %1
1882}
1883
1884define <vscale x 8 x i8> @test_vlsseg8_mask_nxv8i8(i8* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
1885; CHECK-LABEL: test_vlsseg8_mask_nxv8i8:
1886; CHECK:       # %bb.0: # %entry
1887; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
1888; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1
1889; CHECK-NEXT:    vmv1r.v v8, v7
1890; CHECK-NEXT:    vmv1r.v v9, v7
1891; CHECK-NEXT:    vmv1r.v v10, v7
1892; CHECK-NEXT:    vmv1r.v v11, v7
1893; CHECK-NEXT:    vmv1r.v v12, v7
1894; CHECK-NEXT:    vmv1r.v v13, v7
1895; CHECK-NEXT:    vmv1r.v v14, v7
1896; CHECK-NEXT:    vsetvli zero, zero, e8, m1, tu, mu
1897; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1, v0.t
1898; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
1899; CHECK-NEXT:    ret
1900entry:
1901  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(i8* %base, i64 %offset, i64 %vl)
1902  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
1903  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
1904  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
1905  ret <vscale x 8 x i8> %3
1906}
1907
1908declare {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlsseg2.nxv4i64(i64*, i64, i64)
1909declare {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlsseg2.mask.nxv4i64(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, i64, <vscale x 4 x i1>, i64)
1910
1911define <vscale x 4 x i64> @test_vlsseg2_nxv4i64(i64* %base, i64 %offset, i64 %vl) {
1912; CHECK-LABEL: test_vlsseg2_nxv4i64:
1913; CHECK:       # %bb.0: # %entry
1914; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
1915; CHECK-NEXT:    vlsseg2e64.v v4, (a0), a1
1916; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
1917; CHECK-NEXT:    ret
1918entry:
1919  %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlsseg2.nxv4i64(i64* %base, i64 %offset, i64 %vl)
1920  %1 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>} %0, 1
1921  ret <vscale x 4 x i64> %1
1922}
1923
1924define <vscale x 4 x i64> @test_vlsseg2_mask_nxv4i64(i64* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
1925; CHECK-LABEL: test_vlsseg2_mask_nxv4i64:
1926; CHECK:       # %bb.0: # %entry
1927; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
1928; CHECK-NEXT:    vlsseg2e64.v v4, (a0), a1
1929; CHECK-NEXT:    vmv4r.v v8, v4
1930; CHECK-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
1931; CHECK-NEXT:    vlsseg2e64.v v4, (a0), a1, v0.t
1932; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
1933; CHECK-NEXT:    ret
1934entry:
1935  %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlsseg2.nxv4i64(i64* %base, i64 %offset, i64 %vl)
1936  %1 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>} %0, 0
1937  %2 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlsseg2.mask.nxv4i64(<vscale x 4 x i64> %1,<vscale x 4 x i64> %1, i64* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
1938  %3 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>} %2, 1
1939  ret <vscale x 4 x i64> %3
1940}
1941
1942declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(i16*, i64, i64)
1943declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, <vscale x 4 x i1>, i64)
1944
1945define <vscale x 4 x i16> @test_vlsseg2_nxv4i16(i16* %base, i64 %offset, i64 %vl) {
1946; CHECK-LABEL: test_vlsseg2_nxv4i16:
1947; CHECK:       # %bb.0: # %entry
1948; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
1949; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
1950; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
1951; CHECK-NEXT:    ret
1952entry:
1953  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(i16* %base, i64 %offset, i64 %vl)
1954  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
1955  ret <vscale x 4 x i16> %1
1956}
1957
1958define <vscale x 4 x i16> @test_vlsseg2_mask_nxv4i16(i16* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
1959; CHECK-LABEL: test_vlsseg2_mask_nxv4i16:
1960; CHECK:       # %bb.0: # %entry
1961; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
1962; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
1963; CHECK-NEXT:    vmv1r.v v8, v7
1964; CHECK-NEXT:    vsetvli zero, zero, e16, m1, tu, mu
1965; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1, v0.t
1966; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
1967; CHECK-NEXT:    ret
1968entry:
1969  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(i16* %base, i64 %offset, i64 %vl)
1970  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
1971  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
1972  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
1973  ret <vscale x 4 x i16> %3
1974}
1975
1976declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(i16*, i64, i64)
1977declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, <vscale x 4 x i1>, i64)
1978
1979define <vscale x 4 x i16> @test_vlsseg3_nxv4i16(i16* %base, i64 %offset, i64 %vl) {
1980; CHECK-LABEL: test_vlsseg3_nxv4i16:
1981; CHECK:       # %bb.0: # %entry
1982; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
1983; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1
1984; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
1985; CHECK-NEXT:    ret
1986entry:
1987  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(i16* %base, i64 %offset, i64 %vl)
1988  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
1989  ret <vscale x 4 x i16> %1
1990}
1991
1992define <vscale x 4 x i16> @test_vlsseg3_mask_nxv4i16(i16* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
1993; CHECK-LABEL: test_vlsseg3_mask_nxv4i16:
1994; CHECK:       # %bb.0: # %entry
1995; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
1996; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1
1997; CHECK-NEXT:    vmv1r.v v8, v7
1998; CHECK-NEXT:    vmv1r.v v9, v7
1999; CHECK-NEXT:    vsetvli zero, zero, e16, m1, tu, mu
2000; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1, v0.t
2001; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
2002; CHECK-NEXT:    ret
2003entry:
2004  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(i16* %base, i64 %offset, i64 %vl)
2005  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
2006  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
2007  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
2008  ret <vscale x 4 x i16> %3
2009}
2010
2011declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(i16*, i64, i64)
2012declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, <vscale x 4 x i1>, i64)
2013
2014define <vscale x 4 x i16> @test_vlsseg4_nxv4i16(i16* %base, i64 %offset, i64 %vl) {
2015; CHECK-LABEL: test_vlsseg4_nxv4i16:
2016; CHECK:       # %bb.0: # %entry
2017; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
2018; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1
2019; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
2020; CHECK-NEXT:    ret
2021entry:
2022  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(i16* %base, i64 %offset, i64 %vl)
2023  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2024  ret <vscale x 4 x i16> %1
2025}
2026
2027define <vscale x 4 x i16> @test_vlsseg4_mask_nxv4i16(i16* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
2028; CHECK-LABEL: test_vlsseg4_mask_nxv4i16:
2029; CHECK:       # %bb.0: # %entry
2030; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
2031; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1
2032; CHECK-NEXT:    vmv1r.v v8, v7
2033; CHECK-NEXT:    vmv1r.v v9, v7
2034; CHECK-NEXT:    vmv1r.v v10, v7
2035; CHECK-NEXT:    vsetvli zero, zero, e16, m1, tu, mu
2036; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1, v0.t
2037; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
2038; CHECK-NEXT:    ret
2039entry:
2040  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(i16* %base, i64 %offset, i64 %vl)
2041  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
2042  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
2043  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
2044  ret <vscale x 4 x i16> %3
2045}
2046
2047declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(i16*, i64, i64)
2048declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, <vscale x 4 x i1>, i64)
2049
2050define <vscale x 4 x i16> @test_vlsseg5_nxv4i16(i16* %base, i64 %offset, i64 %vl) {
2051; CHECK-LABEL: test_vlsseg5_nxv4i16:
2052; CHECK:       # %bb.0: # %entry
2053; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
2054; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1
2055; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
2056; CHECK-NEXT:    ret
2057entry:
2058  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(i16* %base, i64 %offset, i64 %vl)
2059  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2060  ret <vscale x 4 x i16> %1
2061}
2062
2063define <vscale x 4 x i16> @test_vlsseg5_mask_nxv4i16(i16* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
2064; CHECK-LABEL: test_vlsseg5_mask_nxv4i16:
2065; CHECK:       # %bb.0: # %entry
2066; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
2067; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1
2068; CHECK-NEXT:    vmv1r.v v8, v7
2069; CHECK-NEXT:    vmv1r.v v9, v7
2070; CHECK-NEXT:    vmv1r.v v10, v7
2071; CHECK-NEXT:    vmv1r.v v11, v7
2072; CHECK-NEXT:    vsetvli zero, zero, e16, m1, tu, mu
2073; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1, v0.t
2074; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
2075; CHECK-NEXT:    ret
2076entry:
2077  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(i16* %base, i64 %offset, i64 %vl)
2078  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
2079  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
2080  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
2081  ret <vscale x 4 x i16> %3
2082}
2083
2084declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(i16*, i64, i64)
2085declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, <vscale x 4 x i1>, i64)
2086
2087define <vscale x 4 x i16> @test_vlsseg6_nxv4i16(i16* %base, i64 %offset, i64 %vl) {
2088; CHECK-LABEL: test_vlsseg6_nxv4i16:
2089; CHECK:       # %bb.0: # %entry
2090; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
2091; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1
2092; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
2093; CHECK-NEXT:    ret
2094entry:
2095  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(i16* %base, i64 %offset, i64 %vl)
2096  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2097  ret <vscale x 4 x i16> %1
2098}
2099
2100define <vscale x 4 x i16> @test_vlsseg6_mask_nxv4i16(i16* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
2101; CHECK-LABEL: test_vlsseg6_mask_nxv4i16:
2102; CHECK:       # %bb.0: # %entry
2103; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
2104; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1
2105; CHECK-NEXT:    vmv1r.v v8, v7
2106; CHECK-NEXT:    vmv1r.v v9, v7
2107; CHECK-NEXT:    vmv1r.v v10, v7
2108; CHECK-NEXT:    vmv1r.v v11, v7
2109; CHECK-NEXT:    vmv1r.v v12, v7
2110; CHECK-NEXT:    vsetvli zero, zero, e16, m1, tu, mu
2111; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1, v0.t
2112; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
2113; CHECK-NEXT:    ret
2114entry:
2115  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(i16* %base, i64 %offset, i64 %vl)
2116  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
2117  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
2118  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
2119  ret <vscale x 4 x i16> %3
2120}
2121
2122declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(i16*, i64, i64)
2123declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, <vscale x 4 x i1>, i64)
2124
2125define <vscale x 4 x i16> @test_vlsseg7_nxv4i16(i16* %base, i64 %offset, i64 %vl) {
2126; CHECK-LABEL: test_vlsseg7_nxv4i16:
2127; CHECK:       # %bb.0: # %entry
2128; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
2129; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1
2130; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
2131; CHECK-NEXT:    ret
2132entry:
2133  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(i16* %base, i64 %offset, i64 %vl)
2134  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2135  ret <vscale x 4 x i16> %1
2136}
2137
2138define <vscale x 4 x i16> @test_vlsseg7_mask_nxv4i16(i16* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
2139; CHECK-LABEL: test_vlsseg7_mask_nxv4i16:
2140; CHECK:       # %bb.0: # %entry
2141; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
2142; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1
2143; CHECK-NEXT:    vmv1r.v v8, v7
2144; CHECK-NEXT:    vmv1r.v v9, v7
2145; CHECK-NEXT:    vmv1r.v v10, v7
2146; CHECK-NEXT:    vmv1r.v v11, v7
2147; CHECK-NEXT:    vmv1r.v v12, v7
2148; CHECK-NEXT:    vmv1r.v v13, v7
2149; CHECK-NEXT:    vsetvli zero, zero, e16, m1, tu, mu
2150; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1, v0.t
2151; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
2152; CHECK-NEXT:    ret
2153entry:
2154  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(i16* %base, i64 %offset, i64 %vl)
2155  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
2156  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
2157  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
2158  ret <vscale x 4 x i16> %3
2159}
2160
2161declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(i16*, i64, i64)
2162declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, <vscale x 4 x i1>, i64)
2163
2164define <vscale x 4 x i16> @test_vlsseg8_nxv4i16(i16* %base, i64 %offset, i64 %vl) {
2165; CHECK-LABEL: test_vlsseg8_nxv4i16:
2166; CHECK:       # %bb.0: # %entry
2167; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
2168; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1
2169; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
2170; CHECK-NEXT:    ret
2171entry:
2172  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(i16* %base, i64 %offset, i64 %vl)
2173  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2174  ret <vscale x 4 x i16> %1
2175}
2176
2177define <vscale x 4 x i16> @test_vlsseg8_mask_nxv4i16(i16* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
2178; CHECK-LABEL: test_vlsseg8_mask_nxv4i16:
2179; CHECK:       # %bb.0: # %entry
2180; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
2181; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1
2182; CHECK-NEXT:    vmv1r.v v8, v7
2183; CHECK-NEXT:    vmv1r.v v9, v7
2184; CHECK-NEXT:    vmv1r.v v10, v7
2185; CHECK-NEXT:    vmv1r.v v11, v7
2186; CHECK-NEXT:    vmv1r.v v12, v7
2187; CHECK-NEXT:    vmv1r.v v13, v7
2188; CHECK-NEXT:    vmv1r.v v14, v7
2189; CHECK-NEXT:    vsetvli zero, zero, e16, m1, tu, mu
2190; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1, v0.t
2191; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
2192; CHECK-NEXT:    ret
2193entry:
2194  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(i16* %base, i64 %offset, i64 %vl)
2195  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
2196  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
2197  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
2198  ret <vscale x 4 x i16> %3
2199}
2200
2201declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(i8*, i64, i64)
2202declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, <vscale x 1 x i1>, i64)
2203
2204define <vscale x 1 x i8> @test_vlsseg2_nxv1i8(i8* %base, i64 %offset, i64 %vl) {
2205; CHECK-LABEL: test_vlsseg2_nxv1i8:
2206; CHECK:       # %bb.0: # %entry
2207; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
2208; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1
2209; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
2210; CHECK-NEXT:    ret
2211entry:
2212  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(i8* %base, i64 %offset, i64 %vl)
2213  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
2214  ret <vscale x 1 x i8> %1
2215}
2216
2217define <vscale x 1 x i8> @test_vlsseg2_mask_nxv1i8(i8* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2218; CHECK-LABEL: test_vlsseg2_mask_nxv1i8:
2219; CHECK:       # %bb.0: # %entry
2220; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
2221; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1
2222; CHECK-NEXT:    vmv1r.v v8, v7
2223; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, tu, mu
2224; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1, v0.t
2225; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
2226; CHECK-NEXT:    ret
2227entry:
2228  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(i8* %base, i64 %offset, i64 %vl)
2229  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
2230  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
2231  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
2232  ret <vscale x 1 x i8> %3
2233}
2234
2235declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(i8*, i64, i64)
2236declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, <vscale x 1 x i1>, i64)
2237
2238define <vscale x 1 x i8> @test_vlsseg3_nxv1i8(i8* %base, i64 %offset, i64 %vl) {
2239; CHECK-LABEL: test_vlsseg3_nxv1i8:
2240; CHECK:       # %bb.0: # %entry
2241; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
2242; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1
2243; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
2244; CHECK-NEXT:    ret
2245entry:
2246  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(i8* %base, i64 %offset, i64 %vl)
2247  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
2248  ret <vscale x 1 x i8> %1
2249}
2250
2251define <vscale x 1 x i8> @test_vlsseg3_mask_nxv1i8(i8* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2252; CHECK-LABEL: test_vlsseg3_mask_nxv1i8:
2253; CHECK:       # %bb.0: # %entry
2254; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
2255; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1
2256; CHECK-NEXT:    vmv1r.v v8, v7
2257; CHECK-NEXT:    vmv1r.v v9, v7
2258; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, tu, mu
2259; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1, v0.t
2260; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
2261; CHECK-NEXT:    ret
2262entry:
2263  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(i8* %base, i64 %offset, i64 %vl)
2264  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
2265  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
2266  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
2267  ret <vscale x 1 x i8> %3
2268}
2269
2270declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(i8*, i64, i64)
2271declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, <vscale x 1 x i1>, i64)
2272
2273define <vscale x 1 x i8> @test_vlsseg4_nxv1i8(i8* %base, i64 %offset, i64 %vl) {
2274; CHECK-LABEL: test_vlsseg4_nxv1i8:
2275; CHECK:       # %bb.0: # %entry
2276; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
2277; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1
2278; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
2279; CHECK-NEXT:    ret
2280entry:
2281  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(i8* %base, i64 %offset, i64 %vl)
2282  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
2283  ret <vscale x 1 x i8> %1
2284}
2285
2286define <vscale x 1 x i8> @test_vlsseg4_mask_nxv1i8(i8* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2287; CHECK-LABEL: test_vlsseg4_mask_nxv1i8:
2288; CHECK:       # %bb.0: # %entry
2289; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
2290; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1
2291; CHECK-NEXT:    vmv1r.v v8, v7
2292; CHECK-NEXT:    vmv1r.v v9, v7
2293; CHECK-NEXT:    vmv1r.v v10, v7
2294; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, tu, mu
2295; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1, v0.t
2296; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
2297; CHECK-NEXT:    ret
2298entry:
2299  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(i8* %base, i64 %offset, i64 %vl)
2300  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
2301  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
2302  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
2303  ret <vscale x 1 x i8> %3
2304}
2305
2306declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(i8*, i64, i64)
2307declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, <vscale x 1 x i1>, i64)
2308
2309define <vscale x 1 x i8> @test_vlsseg5_nxv1i8(i8* %base, i64 %offset, i64 %vl) {
2310; CHECK-LABEL: test_vlsseg5_nxv1i8:
2311; CHECK:       # %bb.0: # %entry
2312; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
2313; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1
2314; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
2315; CHECK-NEXT:    ret
2316entry:
2317  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(i8* %base, i64 %offset, i64 %vl)
2318  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
2319  ret <vscale x 1 x i8> %1
2320}
2321
2322define <vscale x 1 x i8> @test_vlsseg5_mask_nxv1i8(i8* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2323; CHECK-LABEL: test_vlsseg5_mask_nxv1i8:
2324; CHECK:       # %bb.0: # %entry
2325; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
2326; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1
2327; CHECK-NEXT:    vmv1r.v v8, v7
2328; CHECK-NEXT:    vmv1r.v v9, v7
2329; CHECK-NEXT:    vmv1r.v v10, v7
2330; CHECK-NEXT:    vmv1r.v v11, v7
2331; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, tu, mu
2332; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1, v0.t
2333; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
2334; CHECK-NEXT:    ret
2335entry:
2336  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(i8* %base, i64 %offset, i64 %vl)
2337  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
2338  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
2339  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
2340  ret <vscale x 1 x i8> %3
2341}
2342
2343declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(i8*, i64, i64)
2344declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, <vscale x 1 x i1>, i64)
2345
2346define <vscale x 1 x i8> @test_vlsseg6_nxv1i8(i8* %base, i64 %offset, i64 %vl) {
2347; CHECK-LABEL: test_vlsseg6_nxv1i8:
2348; CHECK:       # %bb.0: # %entry
2349; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
2350; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1
2351; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
2352; CHECK-NEXT:    ret
2353entry:
2354  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(i8* %base, i64 %offset, i64 %vl)
2355  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
2356  ret <vscale x 1 x i8> %1
2357}
2358
2359define <vscale x 1 x i8> @test_vlsseg6_mask_nxv1i8(i8* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2360; CHECK-LABEL: test_vlsseg6_mask_nxv1i8:
2361; CHECK:       # %bb.0: # %entry
2362; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
2363; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1
2364; CHECK-NEXT:    vmv1r.v v8, v7
2365; CHECK-NEXT:    vmv1r.v v9, v7
2366; CHECK-NEXT:    vmv1r.v v10, v7
2367; CHECK-NEXT:    vmv1r.v v11, v7
2368; CHECK-NEXT:    vmv1r.v v12, v7
2369; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, tu, mu
2370; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1, v0.t
2371; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
2372; CHECK-NEXT:    ret
2373entry:
2374  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(i8* %base, i64 %offset, i64 %vl)
2375  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
2376  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
2377  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
2378  ret <vscale x 1 x i8> %3
2379}
2380
2381declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(i8*, i64, i64)
2382declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, <vscale x 1 x i1>, i64)
2383
2384define <vscale x 1 x i8> @test_vlsseg7_nxv1i8(i8* %base, i64 %offset, i64 %vl) {
2385; CHECK-LABEL: test_vlsseg7_nxv1i8:
2386; CHECK:       # %bb.0: # %entry
2387; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
2388; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1
2389; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
2390; CHECK-NEXT:    ret
2391entry:
2392  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(i8* %base, i64 %offset, i64 %vl)
2393  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
2394  ret <vscale x 1 x i8> %1
2395}
2396
2397define <vscale x 1 x i8> @test_vlsseg7_mask_nxv1i8(i8* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2398; CHECK-LABEL: test_vlsseg7_mask_nxv1i8:
2399; CHECK:       # %bb.0: # %entry
2400; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
2401; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1
2402; CHECK-NEXT:    vmv1r.v v8, v7
2403; CHECK-NEXT:    vmv1r.v v9, v7
2404; CHECK-NEXT:    vmv1r.v v10, v7
2405; CHECK-NEXT:    vmv1r.v v11, v7
2406; CHECK-NEXT:    vmv1r.v v12, v7
2407; CHECK-NEXT:    vmv1r.v v13, v7
2408; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, tu, mu
2409; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1, v0.t
2410; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
2411; CHECK-NEXT:    ret
2412entry:
2413  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(i8* %base, i64 %offset, i64 %vl)
2414  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
2415  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
2416  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
2417  ret <vscale x 1 x i8> %3
2418}
2419
2420declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(i8*, i64, i64)
2421declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, <vscale x 1 x i1>, i64)
2422
2423define <vscale x 1 x i8> @test_vlsseg8_nxv1i8(i8* %base, i64 %offset, i64 %vl) {
2424; CHECK-LABEL: test_vlsseg8_nxv1i8:
2425; CHECK:       # %bb.0: # %entry
2426; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
2427; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1
2428; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
2429; CHECK-NEXT:    ret
2430entry:
2431  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(i8* %base, i64 %offset, i64 %vl)
2432  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
2433  ret <vscale x 1 x i8> %1
2434}
2435
2436define <vscale x 1 x i8> @test_vlsseg8_mask_nxv1i8(i8* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2437; CHECK-LABEL: test_vlsseg8_mask_nxv1i8:
2438; CHECK:       # %bb.0: # %entry
2439; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
2440; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1
2441; CHECK-NEXT:    vmv1r.v v8, v7
2442; CHECK-NEXT:    vmv1r.v v9, v7
2443; CHECK-NEXT:    vmv1r.v v10, v7
2444; CHECK-NEXT:    vmv1r.v v11, v7
2445; CHECK-NEXT:    vmv1r.v v12, v7
2446; CHECK-NEXT:    vmv1r.v v13, v7
2447; CHECK-NEXT:    vmv1r.v v14, v7
2448; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, tu, mu
2449; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1, v0.t
2450; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
2451; CHECK-NEXT:    ret
2452entry:
2453  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(i8* %base, i64 %offset, i64 %vl)
2454  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
2455  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
2456  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
2457  ret <vscale x 1 x i8> %3
2458}
2459
2460declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(i8*, i64, i64)
2461declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, <vscale x 2 x i1>, i64)
2462
2463define <vscale x 2 x i8> @test_vlsseg2_nxv2i8(i8* %base, i64 %offset, i64 %vl) {
2464; CHECK-LABEL: test_vlsseg2_nxv2i8:
2465; CHECK:       # %bb.0: # %entry
2466; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
2467; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1
2468; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
2469; CHECK-NEXT:    ret
2470entry:
2471  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(i8* %base, i64 %offset, i64 %vl)
2472  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
2473  ret <vscale x 2 x i8> %1
2474}
2475
2476define <vscale x 2 x i8> @test_vlsseg2_mask_nxv2i8(i8* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2477; CHECK-LABEL: test_vlsseg2_mask_nxv2i8:
2478; CHECK:       # %bb.0: # %entry
2479; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
2480; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1
2481; CHECK-NEXT:    vmv1r.v v8, v7
2482; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, tu, mu
2483; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1, v0.t
2484; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
2485; CHECK-NEXT:    ret
2486entry:
2487  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(i8* %base, i64 %offset, i64 %vl)
2488  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
2489  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
2490  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
2491  ret <vscale x 2 x i8> %3
2492}
2493
2494declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(i8*, i64, i64)
2495declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, <vscale x 2 x i1>, i64)
2496
2497define <vscale x 2 x i8> @test_vlsseg3_nxv2i8(i8* %base, i64 %offset, i64 %vl) {
2498; CHECK-LABEL: test_vlsseg3_nxv2i8:
2499; CHECK:       # %bb.0: # %entry
2500; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
2501; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1
2502; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
2503; CHECK-NEXT:    ret
2504entry:
2505  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(i8* %base, i64 %offset, i64 %vl)
2506  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
2507  ret <vscale x 2 x i8> %1
2508}
2509
2510define <vscale x 2 x i8> @test_vlsseg3_mask_nxv2i8(i8* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2511; CHECK-LABEL: test_vlsseg3_mask_nxv2i8:
2512; CHECK:       # %bb.0: # %entry
2513; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
2514; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1
2515; CHECK-NEXT:    vmv1r.v v8, v7
2516; CHECK-NEXT:    vmv1r.v v9, v7
2517; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, tu, mu
2518; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1, v0.t
2519; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
2520; CHECK-NEXT:    ret
2521entry:
2522  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(i8* %base, i64 %offset, i64 %vl)
2523  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
2524  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
2525  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
2526  ret <vscale x 2 x i8> %3
2527}
2528
2529declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(i8*, i64, i64)
2530declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, <vscale x 2 x i1>, i64)
2531
2532define <vscale x 2 x i8> @test_vlsseg4_nxv2i8(i8* %base, i64 %offset, i64 %vl) {
2533; CHECK-LABEL: test_vlsseg4_nxv2i8:
2534; CHECK:       # %bb.0: # %entry
2535; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
2536; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1
2537; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
2538; CHECK-NEXT:    ret
2539entry:
2540  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(i8* %base, i64 %offset, i64 %vl)
2541  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
2542  ret <vscale x 2 x i8> %1
2543}
2544
2545define <vscale x 2 x i8> @test_vlsseg4_mask_nxv2i8(i8* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2546; CHECK-LABEL: test_vlsseg4_mask_nxv2i8:
2547; CHECK:       # %bb.0: # %entry
2548; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
2549; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1
2550; CHECK-NEXT:    vmv1r.v v8, v7
2551; CHECK-NEXT:    vmv1r.v v9, v7
2552; CHECK-NEXT:    vmv1r.v v10, v7
2553; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, tu, mu
2554; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1, v0.t
2555; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
2556; CHECK-NEXT:    ret
2557entry:
2558  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(i8* %base, i64 %offset, i64 %vl)
2559  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
2560  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
2561  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
2562  ret <vscale x 2 x i8> %3
2563}
2564
2565declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(i8*, i64, i64)
2566declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, <vscale x 2 x i1>, i64)
2567
2568define <vscale x 2 x i8> @test_vlsseg5_nxv2i8(i8* %base, i64 %offset, i64 %vl) {
2569; CHECK-LABEL: test_vlsseg5_nxv2i8:
2570; CHECK:       # %bb.0: # %entry
2571; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
2572; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1
2573; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
2574; CHECK-NEXT:    ret
2575entry:
2576  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(i8* %base, i64 %offset, i64 %vl)
2577  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
2578  ret <vscale x 2 x i8> %1
2579}
2580
2581define <vscale x 2 x i8> @test_vlsseg5_mask_nxv2i8(i8* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2582; CHECK-LABEL: test_vlsseg5_mask_nxv2i8:
2583; CHECK:       # %bb.0: # %entry
2584; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
2585; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1
2586; CHECK-NEXT:    vmv1r.v v8, v7
2587; CHECK-NEXT:    vmv1r.v v9, v7
2588; CHECK-NEXT:    vmv1r.v v10, v7
2589; CHECK-NEXT:    vmv1r.v v11, v7
2590; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, tu, mu
2591; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1, v0.t
2592; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
2593; CHECK-NEXT:    ret
2594entry:
2595  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(i8* %base, i64 %offset, i64 %vl)
2596  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
2597  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
2598  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
2599  ret <vscale x 2 x i8> %3
2600}
2601
2602declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(i8*, i64, i64)
2603declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, <vscale x 2 x i1>, i64)
2604
2605define <vscale x 2 x i8> @test_vlsseg6_nxv2i8(i8* %base, i64 %offset, i64 %vl) {
2606; CHECK-LABEL: test_vlsseg6_nxv2i8:
2607; CHECK:       # %bb.0: # %entry
2608; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
2609; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1
2610; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
2611; CHECK-NEXT:    ret
2612entry:
2613  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(i8* %base, i64 %offset, i64 %vl)
2614  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
2615  ret <vscale x 2 x i8> %1
2616}
2617
2618define <vscale x 2 x i8> @test_vlsseg6_mask_nxv2i8(i8* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2619; CHECK-LABEL: test_vlsseg6_mask_nxv2i8:
2620; CHECK:       # %bb.0: # %entry
2621; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
2622; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1
2623; CHECK-NEXT:    vmv1r.v v8, v7
2624; CHECK-NEXT:    vmv1r.v v9, v7
2625; CHECK-NEXT:    vmv1r.v v10, v7
2626; CHECK-NEXT:    vmv1r.v v11, v7
2627; CHECK-NEXT:    vmv1r.v v12, v7
2628; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, tu, mu
2629; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1, v0.t
2630; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
2631; CHECK-NEXT:    ret
2632entry:
2633  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(i8* %base, i64 %offset, i64 %vl)
2634  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
2635  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
2636  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
2637  ret <vscale x 2 x i8> %3
2638}
2639
2640declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(i8*, i64, i64)
2641declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, <vscale x 2 x i1>, i64)
2642
2643define <vscale x 2 x i8> @test_vlsseg7_nxv2i8(i8* %base, i64 %offset, i64 %vl) {
2644; CHECK-LABEL: test_vlsseg7_nxv2i8:
2645; CHECK:       # %bb.0: # %entry
2646; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
2647; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1
2648; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
2649; CHECK-NEXT:    ret
2650entry:
2651  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(i8* %base, i64 %offset, i64 %vl)
2652  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
2653  ret <vscale x 2 x i8> %1
2654}
2655
2656define <vscale x 2 x i8> @test_vlsseg7_mask_nxv2i8(i8* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2657; CHECK-LABEL: test_vlsseg7_mask_nxv2i8:
2658; CHECK:       # %bb.0: # %entry
2659; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
2660; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1
2661; CHECK-NEXT:    vmv1r.v v8, v7
2662; CHECK-NEXT:    vmv1r.v v9, v7
2663; CHECK-NEXT:    vmv1r.v v10, v7
2664; CHECK-NEXT:    vmv1r.v v11, v7
2665; CHECK-NEXT:    vmv1r.v v12, v7
2666; CHECK-NEXT:    vmv1r.v v13, v7
2667; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, tu, mu
2668; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1, v0.t
2669; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
2670; CHECK-NEXT:    ret
2671entry:
2672  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(i8* %base, i64 %offset, i64 %vl)
2673  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
2674  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
2675  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
2676  ret <vscale x 2 x i8> %3
2677}
2678
2679declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(i8*, i64, i64)
2680declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, <vscale x 2 x i1>, i64)
2681
2682define <vscale x 2 x i8> @test_vlsseg8_nxv2i8(i8* %base, i64 %offset, i64 %vl) {
2683; CHECK-LABEL: test_vlsseg8_nxv2i8:
2684; CHECK:       # %bb.0: # %entry
2685; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
2686; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1
2687; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
2688; CHECK-NEXT:    ret
2689entry:
2690  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(i8* %base, i64 %offset, i64 %vl)
2691  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
2692  ret <vscale x 2 x i8> %1
2693}
2694
2695define <vscale x 2 x i8> @test_vlsseg8_mask_nxv2i8(i8* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2696; CHECK-LABEL: test_vlsseg8_mask_nxv2i8:
2697; CHECK:       # %bb.0: # %entry
2698; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
2699; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1
2700; CHECK-NEXT:    vmv1r.v v8, v7
2701; CHECK-NEXT:    vmv1r.v v9, v7
2702; CHECK-NEXT:    vmv1r.v v10, v7
2703; CHECK-NEXT:    vmv1r.v v11, v7
2704; CHECK-NEXT:    vmv1r.v v12, v7
2705; CHECK-NEXT:    vmv1r.v v13, v7
2706; CHECK-NEXT:    vmv1r.v v14, v7
2707; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, tu, mu
2708; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1, v0.t
2709; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
2710; CHECK-NEXT:    ret
2711entry:
2712  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(i8* %base, i64 %offset, i64 %vl)
2713  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
2714  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
2715  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
2716  ret <vscale x 2 x i8> %3
2717}
2718
2719declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(i32*, i64, i64)
2720declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.mask.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, i64, <vscale x 8 x i1>, i64)
2721
2722define <vscale x 8 x i32> @test_vlsseg2_nxv8i32(i32* %base, i64 %offset, i64 %vl) {
2723; CHECK-LABEL: test_vlsseg2_nxv8i32:
2724; CHECK:       # %bb.0: # %entry
2725; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
2726; CHECK-NEXT:    vlsseg2e32.v v4, (a0), a1
2727; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
2728; CHECK-NEXT:    ret
2729entry:
2730  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(i32* %base, i64 %offset, i64 %vl)
2731  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
2732  ret <vscale x 8 x i32> %1
2733}
2734
2735define <vscale x 8 x i32> @test_vlsseg2_mask_nxv8i32(i32* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
2736; CHECK-LABEL: test_vlsseg2_mask_nxv8i32:
2737; CHECK:       # %bb.0: # %entry
2738; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
2739; CHECK-NEXT:    vlsseg2e32.v v4, (a0), a1
2740; CHECK-NEXT:    vmv4r.v v8, v4
2741; CHECK-NEXT:    vsetvli zero, zero, e32, m4, tu, mu
2742; CHECK-NEXT:    vlsseg2e32.v v4, (a0), a1, v0.t
2743; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
2744; CHECK-NEXT:    ret
2745entry:
2746  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(i32* %base, i64 %offset, i64 %vl)
2747  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 0
2748  %2 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.mask.nxv8i32(<vscale x 8 x i32> %1,<vscale x 8 x i32> %1, i32* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
2749  %3 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %2, 1
2750  ret <vscale x 8 x i32> %3
2751}
2752
2753declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(i8*, i64, i64)
2754declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.mask.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, i64, <vscale x 32 x i1>, i64)
2755
2756define <vscale x 32 x i8> @test_vlsseg2_nxv32i8(i8* %base, i64 %offset, i64 %vl) {
2757; CHECK-LABEL: test_vlsseg2_nxv32i8:
2758; CHECK:       # %bb.0: # %entry
2759; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
2760; CHECK-NEXT:    vlsseg2e8.v v4, (a0), a1
2761; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
2762; CHECK-NEXT:    ret
2763entry:
2764  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(i8* %base, i64 %offset, i64 %vl)
2765  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
2766  ret <vscale x 32 x i8> %1
2767}
2768
2769define <vscale x 32 x i8> @test_vlsseg2_mask_nxv32i8(i8* %base, i64 %offset, i64 %vl, <vscale x 32 x i1> %mask) {
2770; CHECK-LABEL: test_vlsseg2_mask_nxv32i8:
2771; CHECK:       # %bb.0: # %entry
2772; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
2773; CHECK-NEXT:    vlsseg2e8.v v4, (a0), a1
2774; CHECK-NEXT:    vmv4r.v v8, v4
2775; CHECK-NEXT:    vsetvli zero, zero, e8, m4, tu, mu
2776; CHECK-NEXT:    vlsseg2e8.v v4, (a0), a1, v0.t
2777; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
2778; CHECK-NEXT:    ret
2779entry:
2780  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(i8* %base, i64 %offset, i64 %vl)
2781  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 0
2782  %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.mask.nxv32i8(<vscale x 32 x i8> %1,<vscale x 32 x i8> %1, i8* %base, i64 %offset, <vscale x 32 x i1> %mask, i64 %vl)
2783  %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %2, 1
2784  ret <vscale x 32 x i8> %3
2785}
2786
2787declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(i16*, i64, i64)
2788declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, <vscale x 2 x i1>, i64)
2789
2790define <vscale x 2 x i16> @test_vlsseg2_nxv2i16(i16* %base, i64 %offset, i64 %vl) {
2791; CHECK-LABEL: test_vlsseg2_nxv2i16:
2792; CHECK:       # %bb.0: # %entry
2793; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
2794; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
2795; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
2796; CHECK-NEXT:    ret
2797entry:
2798  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(i16* %base, i64 %offset, i64 %vl)
2799  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
2800  ret <vscale x 2 x i16> %1
2801}
2802
2803define <vscale x 2 x i16> @test_vlsseg2_mask_nxv2i16(i16* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2804; CHECK-LABEL: test_vlsseg2_mask_nxv2i16:
2805; CHECK:       # %bb.0: # %entry
2806; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
2807; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
2808; CHECK-NEXT:    vmv1r.v v8, v7
2809; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, tu, mu
2810; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1, v0.t
2811; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
2812; CHECK-NEXT:    ret
2813entry:
2814  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(i16* %base, i64 %offset, i64 %vl)
2815  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
2816  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
2817  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
2818  ret <vscale x 2 x i16> %3
2819}
2820
2821declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(i16*, i64, i64)
2822declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, <vscale x 2 x i1>, i64)
2823
2824define <vscale x 2 x i16> @test_vlsseg3_nxv2i16(i16* %base, i64 %offset, i64 %vl) {
2825; CHECK-LABEL: test_vlsseg3_nxv2i16:
2826; CHECK:       # %bb.0: # %entry
2827; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
2828; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1
2829; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
2830; CHECK-NEXT:    ret
2831entry:
2832  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(i16* %base, i64 %offset, i64 %vl)
2833  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
2834  ret <vscale x 2 x i16> %1
2835}
2836
2837define <vscale x 2 x i16> @test_vlsseg3_mask_nxv2i16(i16* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2838; CHECK-LABEL: test_vlsseg3_mask_nxv2i16:
2839; CHECK:       # %bb.0: # %entry
2840; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
2841; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1
2842; CHECK-NEXT:    vmv1r.v v8, v7
2843; CHECK-NEXT:    vmv1r.v v9, v7
2844; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, tu, mu
2845; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1, v0.t
2846; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
2847; CHECK-NEXT:    ret
2848entry:
2849  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(i16* %base, i64 %offset, i64 %vl)
2850  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
2851  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
2852  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
2853  ret <vscale x 2 x i16> %3
2854}
2855
2856declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(i16*, i64, i64)
2857declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, <vscale x 2 x i1>, i64)
2858
2859define <vscale x 2 x i16> @test_vlsseg4_nxv2i16(i16* %base, i64 %offset, i64 %vl) {
2860; CHECK-LABEL: test_vlsseg4_nxv2i16:
2861; CHECK:       # %bb.0: # %entry
2862; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
2863; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1
2864; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
2865; CHECK-NEXT:    ret
2866entry:
2867  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(i16* %base, i64 %offset, i64 %vl)
2868  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
2869  ret <vscale x 2 x i16> %1
2870}
2871
2872define <vscale x 2 x i16> @test_vlsseg4_mask_nxv2i16(i16* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2873; CHECK-LABEL: test_vlsseg4_mask_nxv2i16:
2874; CHECK:       # %bb.0: # %entry
2875; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
2876; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1
2877; CHECK-NEXT:    vmv1r.v v8, v7
2878; CHECK-NEXT:    vmv1r.v v9, v7
2879; CHECK-NEXT:    vmv1r.v v10, v7
2880; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, tu, mu
2881; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1, v0.t
2882; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
2883; CHECK-NEXT:    ret
2884entry:
2885  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(i16* %base, i64 %offset, i64 %vl)
2886  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
2887  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
2888  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
2889  ret <vscale x 2 x i16> %3
2890}
2891
2892declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(i16*, i64, i64)
2893declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, <vscale x 2 x i1>, i64)
2894
2895define <vscale x 2 x i16> @test_vlsseg5_nxv2i16(i16* %base, i64 %offset, i64 %vl) {
2896; CHECK-LABEL: test_vlsseg5_nxv2i16:
2897; CHECK:       # %bb.0: # %entry
2898; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
2899; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1
2900; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
2901; CHECK-NEXT:    ret
2902entry:
2903  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(i16* %base, i64 %offset, i64 %vl)
2904  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
2905  ret <vscale x 2 x i16> %1
2906}
2907
2908define <vscale x 2 x i16> @test_vlsseg5_mask_nxv2i16(i16* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2909; CHECK-LABEL: test_vlsseg5_mask_nxv2i16:
2910; CHECK:       # %bb.0: # %entry
2911; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
2912; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1
2913; CHECK-NEXT:    vmv1r.v v8, v7
2914; CHECK-NEXT:    vmv1r.v v9, v7
2915; CHECK-NEXT:    vmv1r.v v10, v7
2916; CHECK-NEXT:    vmv1r.v v11, v7
2917; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, tu, mu
2918; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1, v0.t
2919; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
2920; CHECK-NEXT:    ret
2921entry:
2922  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(i16* %base, i64 %offset, i64 %vl)
2923  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
2924  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
2925  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
2926  ret <vscale x 2 x i16> %3
2927}
2928
2929declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(i16*, i64, i64)
2930declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, <vscale x 2 x i1>, i64)
2931
2932define <vscale x 2 x i16> @test_vlsseg6_nxv2i16(i16* %base, i64 %offset, i64 %vl) {
2933; CHECK-LABEL: test_vlsseg6_nxv2i16:
2934; CHECK:       # %bb.0: # %entry
2935; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
2936; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1
2937; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
2938; CHECK-NEXT:    ret
2939entry:
2940  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(i16* %base, i64 %offset, i64 %vl)
2941  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
2942  ret <vscale x 2 x i16> %1
2943}
2944
2945define <vscale x 2 x i16> @test_vlsseg6_mask_nxv2i16(i16* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2946; CHECK-LABEL: test_vlsseg6_mask_nxv2i16:
2947; CHECK:       # %bb.0: # %entry
2948; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
2949; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1
2950; CHECK-NEXT:    vmv1r.v v8, v7
2951; CHECK-NEXT:    vmv1r.v v9, v7
2952; CHECK-NEXT:    vmv1r.v v10, v7
2953; CHECK-NEXT:    vmv1r.v v11, v7
2954; CHECK-NEXT:    vmv1r.v v12, v7
2955; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, tu, mu
2956; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1, v0.t
2957; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
2958; CHECK-NEXT:    ret
2959entry:
2960  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(i16* %base, i64 %offset, i64 %vl)
2961  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
2962  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
2963  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
2964  ret <vscale x 2 x i16> %3
2965}
2966
2967declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(i16*, i64, i64)
2968declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, <vscale x 2 x i1>, i64)
2969
2970define <vscale x 2 x i16> @test_vlsseg7_nxv2i16(i16* %base, i64 %offset, i64 %vl) {
2971; CHECK-LABEL: test_vlsseg7_nxv2i16:
2972; CHECK:       # %bb.0: # %entry
2973; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
2974; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1
2975; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
2976; CHECK-NEXT:    ret
2977entry:
2978  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(i16* %base, i64 %offset, i64 %vl)
2979  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
2980  ret <vscale x 2 x i16> %1
2981}
2982
2983define <vscale x 2 x i16> @test_vlsseg7_mask_nxv2i16(i16* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2984; CHECK-LABEL: test_vlsseg7_mask_nxv2i16:
2985; CHECK:       # %bb.0: # %entry
2986; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
2987; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1
2988; CHECK-NEXT:    vmv1r.v v8, v7
2989; CHECK-NEXT:    vmv1r.v v9, v7
2990; CHECK-NEXT:    vmv1r.v v10, v7
2991; CHECK-NEXT:    vmv1r.v v11, v7
2992; CHECK-NEXT:    vmv1r.v v12, v7
2993; CHECK-NEXT:    vmv1r.v v13, v7
2994; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, tu, mu
2995; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1, v0.t
2996; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
2997; CHECK-NEXT:    ret
2998entry:
2999  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(i16* %base, i64 %offset, i64 %vl)
3000  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
3001  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
3002  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
3003  ret <vscale x 2 x i16> %3
3004}
3005
3006declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(i16*, i64, i64)
3007declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, <vscale x 2 x i1>, i64)
3008
3009define <vscale x 2 x i16> @test_vlsseg8_nxv2i16(i16* %base, i64 %offset, i64 %vl) {
3010; CHECK-LABEL: test_vlsseg8_nxv2i16:
3011; CHECK:       # %bb.0: # %entry
3012; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
3013; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1
3014; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
3015; CHECK-NEXT:    ret
3016entry:
3017  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(i16* %base, i64 %offset, i64 %vl)
3018  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
3019  ret <vscale x 2 x i16> %1
3020}
3021
3022define <vscale x 2 x i16> @test_vlsseg8_mask_nxv2i16(i16* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3023; CHECK-LABEL: test_vlsseg8_mask_nxv2i16:
3024; CHECK:       # %bb.0: # %entry
3025; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
3026; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1
3027; CHECK-NEXT:    vmv1r.v v8, v7
3028; CHECK-NEXT:    vmv1r.v v9, v7
3029; CHECK-NEXT:    vmv1r.v v10, v7
3030; CHECK-NEXT:    vmv1r.v v11, v7
3031; CHECK-NEXT:    vmv1r.v v12, v7
3032; CHECK-NEXT:    vmv1r.v v13, v7
3033; CHECK-NEXT:    vmv1r.v v14, v7
3034; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, tu, mu
3035; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1, v0.t
3036; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
3037; CHECK-NEXT:    ret
3038entry:
3039  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(i16* %base, i64 %offset, i64 %vl)
3040  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
3041  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
3042  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
3043  ret <vscale x 2 x i16> %3
3044}
3045
3046declare {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg2.nxv2i64(i64*, i64, i64)
3047declare {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg2.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, i64, <vscale x 2 x i1>, i64)
3048
3049define <vscale x 2 x i64> @test_vlsseg2_nxv2i64(i64* %base, i64 %offset, i64 %vl) {
3050; CHECK-LABEL: test_vlsseg2_nxv2i64:
3051; CHECK:       # %bb.0: # %entry
3052; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
3053; CHECK-NEXT:    vlsseg2e64.v v6, (a0), a1
3054; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
3055; CHECK-NEXT:    ret
3056entry:
3057  %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg2.nxv2i64(i64* %base, i64 %offset, i64 %vl)
3058  %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
3059  ret <vscale x 2 x i64> %1
3060}
3061
3062define <vscale x 2 x i64> @test_vlsseg2_mask_nxv2i64(i64* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3063; CHECK-LABEL: test_vlsseg2_mask_nxv2i64:
3064; CHECK:       # %bb.0: # %entry
3065; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
3066; CHECK-NEXT:    vlsseg2e64.v v6, (a0), a1
3067; CHECK-NEXT:    vmv2r.v v8, v6
3068; CHECK-NEXT:    vsetvli zero, zero, e64, m2, tu, mu
3069; CHECK-NEXT:    vlsseg2e64.v v6, (a0), a1, v0.t
3070; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
3071; CHECK-NEXT:    ret
3072entry:
3073  %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg2.nxv2i64(i64* %base, i64 %offset, i64 %vl)
3074  %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 0
3075  %2 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg2.mask.nxv2i64(<vscale x 2 x i64> %1,<vscale x 2 x i64> %1, i64* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
3076  %3 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>} %2, 1
3077  ret <vscale x 2 x i64> %3
3078}
3079
3080declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg3.nxv2i64(i64*, i64, i64)
3081declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg3.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, i64, <vscale x 2 x i1>, i64)
3082
3083define <vscale x 2 x i64> @test_vlsseg3_nxv2i64(i64* %base, i64 %offset, i64 %vl) {
3084; CHECK-LABEL: test_vlsseg3_nxv2i64:
3085; CHECK:       # %bb.0: # %entry
3086; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
3087; CHECK-NEXT:    vlsseg3e64.v v6, (a0), a1
3088; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
3089; CHECK-NEXT:    ret
3090entry:
3091  %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg3.nxv2i64(i64* %base, i64 %offset, i64 %vl)
3092  %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
3093  ret <vscale x 2 x i64> %1
3094}
3095
3096define <vscale x 2 x i64> @test_vlsseg3_mask_nxv2i64(i64* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3097; CHECK-LABEL: test_vlsseg3_mask_nxv2i64:
3098; CHECK:       # %bb.0: # %entry
3099; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
3100; CHECK-NEXT:    vlsseg3e64.v v6, (a0), a1
3101; CHECK-NEXT:    vmv2r.v v8, v6
3102; CHECK-NEXT:    vmv2r.v v10, v6
3103; CHECK-NEXT:    vsetvli zero, zero, e64, m2, tu, mu
3104; CHECK-NEXT:    vlsseg3e64.v v6, (a0), a1, v0.t
3105; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
3106; CHECK-NEXT:    ret
3107entry:
3108  %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg3.nxv2i64(i64* %base, i64 %offset, i64 %vl)
3109  %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 0
3110  %2 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg3.mask.nxv2i64(<vscale x 2 x i64> %1,<vscale x 2 x i64> %1,<vscale x 2 x i64> %1, i64* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
3111  %3 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %2, 1
3112  ret <vscale x 2 x i64> %3
3113}
3114
3115declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg4.nxv2i64(i64*, i64, i64)
3116declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg4.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, i64, <vscale x 2 x i1>, i64)
3117
3118define <vscale x 2 x i64> @test_vlsseg4_nxv2i64(i64* %base, i64 %offset, i64 %vl) {
3119; CHECK-LABEL: test_vlsseg4_nxv2i64:
3120; CHECK:       # %bb.0: # %entry
3121; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
3122; CHECK-NEXT:    vlsseg4e64.v v6, (a0), a1
3123; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
3124; CHECK-NEXT:    ret
3125entry:
3126  %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg4.nxv2i64(i64* %base, i64 %offset, i64 %vl)
3127  %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
3128  ret <vscale x 2 x i64> %1
3129}
3130
3131define <vscale x 2 x i64> @test_vlsseg4_mask_nxv2i64(i64* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3132; CHECK-LABEL: test_vlsseg4_mask_nxv2i64:
3133; CHECK:       # %bb.0: # %entry
3134; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
3135; CHECK-NEXT:    vlsseg4e64.v v6, (a0), a1
3136; CHECK-NEXT:    vmv2r.v v8, v6
3137; CHECK-NEXT:    vmv2r.v v10, v6
3138; CHECK-NEXT:    vmv2r.v v12, v6
3139; CHECK-NEXT:    vsetvli zero, zero, e64, m2, tu, mu
3140; CHECK-NEXT:    vlsseg4e64.v v6, (a0), a1, v0.t
3141; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
3142; CHECK-NEXT:    ret
3143entry:
3144  %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg4.nxv2i64(i64* %base, i64 %offset, i64 %vl)
3145  %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 0
3146  %2 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg4.mask.nxv2i64(<vscale x 2 x i64> %1,<vscale x 2 x i64> %1,<vscale x 2 x i64> %1,<vscale x 2 x i64> %1, i64* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
3147  %3 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %2, 1
3148  ret <vscale x 2 x i64> %3
3149}
3150
3151declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(half*, i64, i64)
3152declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.mask.nxv16f16(<vscale x 16 x half>,<vscale x 16 x half>, half*, i64, <vscale x 16 x i1>, i64)
3153
3154define <vscale x 16 x half> @test_vlsseg2_nxv16f16(half* %base, i64 %offset, i64 %vl) {
3155; CHECK-LABEL: test_vlsseg2_nxv16f16:
3156; CHECK:       # %bb.0: # %entry
3157; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
3158; CHECK-NEXT:    vlsseg2e16.v v4, (a0), a1
3159; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
3160; CHECK-NEXT:    ret
3161entry:
3162  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(half* %base, i64 %offset, i64 %vl)
3163  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
3164  ret <vscale x 16 x half> %1
3165}
3166
3167define <vscale x 16 x half> @test_vlsseg2_mask_nxv16f16(half* %base, i64 %offset, i64 %vl, <vscale x 16 x i1> %mask) {
3168; CHECK-LABEL: test_vlsseg2_mask_nxv16f16:
3169; CHECK:       # %bb.0: # %entry
3170; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
3171; CHECK-NEXT:    vlsseg2e16.v v4, (a0), a1
3172; CHECK-NEXT:    vmv4r.v v8, v4
3173; CHECK-NEXT:    vsetvli zero, zero, e16, m4, tu, mu
3174; CHECK-NEXT:    vlsseg2e16.v v4, (a0), a1, v0.t
3175; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
3176; CHECK-NEXT:    ret
3177entry:
3178  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(half* %base, i64 %offset, i64 %vl)
3179  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 0
3180  %2 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.mask.nxv16f16(<vscale x 16 x half> %1,<vscale x 16 x half> %1, half* %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl)
3181  %3 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %2, 1
3182  ret <vscale x 16 x half> %3
3183}
3184
3185declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(double*, i64, i64)
3186declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.mask.nxv4f64(<vscale x 4 x double>,<vscale x 4 x double>, double*, i64, <vscale x 4 x i1>, i64)
3187
3188define <vscale x 4 x double> @test_vlsseg2_nxv4f64(double* %base, i64 %offset, i64 %vl) {
3189; CHECK-LABEL: test_vlsseg2_nxv4f64:
3190; CHECK:       # %bb.0: # %entry
3191; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
3192; CHECK-NEXT:    vlsseg2e64.v v4, (a0), a1
3193; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
3194; CHECK-NEXT:    ret
3195entry:
3196  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(double* %base, i64 %offset, i64 %vl)
3197  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
3198  ret <vscale x 4 x double> %1
3199}
3200
3201define <vscale x 4 x double> @test_vlsseg2_mask_nxv4f64(double* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
3202; CHECK-LABEL: test_vlsseg2_mask_nxv4f64:
3203; CHECK:       # %bb.0: # %entry
3204; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
3205; CHECK-NEXT:    vlsseg2e64.v v4, (a0), a1
3206; CHECK-NEXT:    vmv4r.v v8, v4
3207; CHECK-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
3208; CHECK-NEXT:    vlsseg2e64.v v4, (a0), a1, v0.t
3209; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
3210; CHECK-NEXT:    ret
3211entry:
3212  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(double* %base, i64 %offset, i64 %vl)
3213  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 0
3214  %2 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.mask.nxv4f64(<vscale x 4 x double> %1,<vscale x 4 x double> %1, double* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
3215  %3 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %2, 1
3216  ret <vscale x 4 x double> %3
3217}
3218
3219declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(double*, i64, i64)
3220declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, <vscale x 1 x i1>, i64)
3221
3222define <vscale x 1 x double> @test_vlsseg2_nxv1f64(double* %base, i64 %offset, i64 %vl) {
3223; CHECK-LABEL: test_vlsseg2_nxv1f64:
3224; CHECK:       # %bb.0: # %entry
3225; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
3226; CHECK-NEXT:    vlsseg2e64.v v7, (a0), a1
3227; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
3228; CHECK-NEXT:    ret
3229entry:
3230  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(double* %base, i64 %offset, i64 %vl)
3231  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
3232  ret <vscale x 1 x double> %1
3233}
3234
3235define <vscale x 1 x double> @test_vlsseg2_mask_nxv1f64(double* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3236; CHECK-LABEL: test_vlsseg2_mask_nxv1f64:
3237; CHECK:       # %bb.0: # %entry
3238; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
3239; CHECK-NEXT:    vlsseg2e64.v v7, (a0), a1
3240; CHECK-NEXT:    vmv1r.v v8, v7
3241; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
3242; CHECK-NEXT:    vlsseg2e64.v v7, (a0), a1, v0.t
3243; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
3244; CHECK-NEXT:    ret
3245entry:
3246  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(double* %base, i64 %offset, i64 %vl)
3247  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
3248  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
3249  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
3250  ret <vscale x 1 x double> %3
3251}
3252
3253declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(double*, i64, i64)
3254declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, <vscale x 1 x i1>, i64)
3255
3256define <vscale x 1 x double> @test_vlsseg3_nxv1f64(double* %base, i64 %offset, i64 %vl) {
3257; CHECK-LABEL: test_vlsseg3_nxv1f64:
3258; CHECK:       # %bb.0: # %entry
3259; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
3260; CHECK-NEXT:    vlsseg3e64.v v7, (a0), a1
3261; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
3262; CHECK-NEXT:    ret
3263entry:
3264  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(double* %base, i64 %offset, i64 %vl)
3265  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
3266  ret <vscale x 1 x double> %1
3267}
3268
3269define <vscale x 1 x double> @test_vlsseg3_mask_nxv1f64(double* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3270; CHECK-LABEL: test_vlsseg3_mask_nxv1f64:
3271; CHECK:       # %bb.0: # %entry
3272; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
3273; CHECK-NEXT:    vlsseg3e64.v v7, (a0), a1
3274; CHECK-NEXT:    vmv1r.v v8, v7
3275; CHECK-NEXT:    vmv1r.v v9, v7
3276; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
3277; CHECK-NEXT:    vlsseg3e64.v v7, (a0), a1, v0.t
3278; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
3279; CHECK-NEXT:    ret
3280entry:
3281  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(double* %base, i64 %offset, i64 %vl)
3282  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
3283  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
3284  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
3285  ret <vscale x 1 x double> %3
3286}
3287
3288declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(double*, i64, i64)
3289declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, <vscale x 1 x i1>, i64)
3290
3291define <vscale x 1 x double> @test_vlsseg4_nxv1f64(double* %base, i64 %offset, i64 %vl) {
3292; CHECK-LABEL: test_vlsseg4_nxv1f64:
3293; CHECK:       # %bb.0: # %entry
3294; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
3295; CHECK-NEXT:    vlsseg4e64.v v7, (a0), a1
3296; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
3297; CHECK-NEXT:    ret
3298entry:
3299  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(double* %base, i64 %offset, i64 %vl)
3300  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
3301  ret <vscale x 1 x double> %1
3302}
3303
3304define <vscale x 1 x double> @test_vlsseg4_mask_nxv1f64(double* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3305; CHECK-LABEL: test_vlsseg4_mask_nxv1f64:
3306; CHECK:       # %bb.0: # %entry
3307; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
3308; CHECK-NEXT:    vlsseg4e64.v v7, (a0), a1
3309; CHECK-NEXT:    vmv1r.v v8, v7
3310; CHECK-NEXT:    vmv1r.v v9, v7
3311; CHECK-NEXT:    vmv1r.v v10, v7
3312; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
3313; CHECK-NEXT:    vlsseg4e64.v v7, (a0), a1, v0.t
3314; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
3315; CHECK-NEXT:    ret
3316entry:
3317  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(double* %base, i64 %offset, i64 %vl)
3318  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
3319  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
3320  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
3321  ret <vscale x 1 x double> %3
3322}
3323
3324declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(double*, i64, i64)
3325declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, <vscale x 1 x i1>, i64)
3326
3327define <vscale x 1 x double> @test_vlsseg5_nxv1f64(double* %base, i64 %offset, i64 %vl) {
3328; CHECK-LABEL: test_vlsseg5_nxv1f64:
3329; CHECK:       # %bb.0: # %entry
3330; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
3331; CHECK-NEXT:    vlsseg5e64.v v7, (a0), a1
3332; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
3333; CHECK-NEXT:    ret
3334entry:
3335  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(double* %base, i64 %offset, i64 %vl)
3336  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
3337  ret <vscale x 1 x double> %1
3338}
3339
3340define <vscale x 1 x double> @test_vlsseg5_mask_nxv1f64(double* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3341; CHECK-LABEL: test_vlsseg5_mask_nxv1f64:
3342; CHECK:       # %bb.0: # %entry
3343; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
3344; CHECK-NEXT:    vlsseg5e64.v v7, (a0), a1
3345; CHECK-NEXT:    vmv1r.v v8, v7
3346; CHECK-NEXT:    vmv1r.v v9, v7
3347; CHECK-NEXT:    vmv1r.v v10, v7
3348; CHECK-NEXT:    vmv1r.v v11, v7
3349; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
3350; CHECK-NEXT:    vlsseg5e64.v v7, (a0), a1, v0.t
3351; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
3352; CHECK-NEXT:    ret
3353entry:
3354  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(double* %base, i64 %offset, i64 %vl)
3355  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
3356  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
3357  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
3358  ret <vscale x 1 x double> %3
3359}
3360
3361declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(double*, i64, i64)
3362declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, <vscale x 1 x i1>, i64)
3363
3364define <vscale x 1 x double> @test_vlsseg6_nxv1f64(double* %base, i64 %offset, i64 %vl) {
3365; CHECK-LABEL: test_vlsseg6_nxv1f64:
3366; CHECK:       # %bb.0: # %entry
3367; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
3368; CHECK-NEXT:    vlsseg6e64.v v7, (a0), a1
3369; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
3370; CHECK-NEXT:    ret
3371entry:
3372  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(double* %base, i64 %offset, i64 %vl)
3373  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
3374  ret <vscale x 1 x double> %1
3375}
3376
3377define <vscale x 1 x double> @test_vlsseg6_mask_nxv1f64(double* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3378; CHECK-LABEL: test_vlsseg6_mask_nxv1f64:
3379; CHECK:       # %bb.0: # %entry
3380; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
3381; CHECK-NEXT:    vlsseg6e64.v v7, (a0), a1
3382; CHECK-NEXT:    vmv1r.v v8, v7
3383; CHECK-NEXT:    vmv1r.v v9, v7
3384; CHECK-NEXT:    vmv1r.v v10, v7
3385; CHECK-NEXT:    vmv1r.v v11, v7
3386; CHECK-NEXT:    vmv1r.v v12, v7
3387; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
3388; CHECK-NEXT:    vlsseg6e64.v v7, (a0), a1, v0.t
3389; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
3390; CHECK-NEXT:    ret
3391entry:
3392  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(double* %base, i64 %offset, i64 %vl)
3393  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
3394  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
3395  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
3396  ret <vscale x 1 x double> %3
3397}
3398
3399declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(double*, i64, i64)
3400declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, <vscale x 1 x i1>, i64)
3401
3402define <vscale x 1 x double> @test_vlsseg7_nxv1f64(double* %base, i64 %offset, i64 %vl) {
3403; CHECK-LABEL: test_vlsseg7_nxv1f64:
3404; CHECK:       # %bb.0: # %entry
3405; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
3406; CHECK-NEXT:    vlsseg7e64.v v7, (a0), a1
3407; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
3408; CHECK-NEXT:    ret
3409entry:
3410  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(double* %base, i64 %offset, i64 %vl)
3411  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
3412  ret <vscale x 1 x double> %1
3413}
3414
3415define <vscale x 1 x double> @test_vlsseg7_mask_nxv1f64(double* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3416; CHECK-LABEL: test_vlsseg7_mask_nxv1f64:
3417; CHECK:       # %bb.0: # %entry
3418; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
3419; CHECK-NEXT:    vlsseg7e64.v v7, (a0), a1
3420; CHECK-NEXT:    vmv1r.v v8, v7
3421; CHECK-NEXT:    vmv1r.v v9, v7
3422; CHECK-NEXT:    vmv1r.v v10, v7
3423; CHECK-NEXT:    vmv1r.v v11, v7
3424; CHECK-NEXT:    vmv1r.v v12, v7
3425; CHECK-NEXT:    vmv1r.v v13, v7
3426; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
3427; CHECK-NEXT:    vlsseg7e64.v v7, (a0), a1, v0.t
3428; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
3429; CHECK-NEXT:    ret
3430entry:
3431  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(double* %base, i64 %offset, i64 %vl)
3432  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
3433  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
3434  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
3435  ret <vscale x 1 x double> %3
3436}
3437
3438declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(double*, i64, i64)
3439declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, <vscale x 1 x i1>, i64)
3440
3441define <vscale x 1 x double> @test_vlsseg8_nxv1f64(double* %base, i64 %offset, i64 %vl) {
3442; CHECK-LABEL: test_vlsseg8_nxv1f64:
3443; CHECK:       # %bb.0: # %entry
3444; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
3445; CHECK-NEXT:    vlsseg8e64.v v7, (a0), a1
3446; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
3447; CHECK-NEXT:    ret
3448entry:
3449  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(double* %base, i64 %offset, i64 %vl)
3450  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
3451  ret <vscale x 1 x double> %1
3452}
3453
3454define <vscale x 1 x double> @test_vlsseg8_mask_nxv1f64(double* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3455; CHECK-LABEL: test_vlsseg8_mask_nxv1f64:
3456; CHECK:       # %bb.0: # %entry
3457; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
3458; CHECK-NEXT:    vlsseg8e64.v v7, (a0), a1
3459; CHECK-NEXT:    vmv1r.v v8, v7
3460; CHECK-NEXT:    vmv1r.v v9, v7
3461; CHECK-NEXT:    vmv1r.v v10, v7
3462; CHECK-NEXT:    vmv1r.v v11, v7
3463; CHECK-NEXT:    vmv1r.v v12, v7
3464; CHECK-NEXT:    vmv1r.v v13, v7
3465; CHECK-NEXT:    vmv1r.v v14, v7
3466; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
3467; CHECK-NEXT:    vlsseg8e64.v v7, (a0), a1, v0.t
3468; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
3469; CHECK-NEXT:    ret
3470entry:
3471  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(double* %base, i64 %offset, i64 %vl)
3472  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
3473  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
3474  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
3475  ret <vscale x 1 x double> %3
3476}
3477
3478declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(float*, i64, i64)
3479declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, <vscale x 2 x i1>, i64)
3480
3481define <vscale x 2 x float> @test_vlsseg2_nxv2f32(float* %base, i64 %offset, i64 %vl) {
3482; CHECK-LABEL: test_vlsseg2_nxv2f32:
3483; CHECK:       # %bb.0: # %entry
3484; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
3485; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1
3486; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
3487; CHECK-NEXT:    ret
3488entry:
3489  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(float* %base, i64 %offset, i64 %vl)
3490  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
3491  ret <vscale x 2 x float> %1
3492}
3493
3494define <vscale x 2 x float> @test_vlsseg2_mask_nxv2f32(float* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3495; CHECK-LABEL: test_vlsseg2_mask_nxv2f32:
3496; CHECK:       # %bb.0: # %entry
3497; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
3498; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1
3499; CHECK-NEXT:    vmv1r.v v8, v7
3500; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
3501; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1, v0.t
3502; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
3503; CHECK-NEXT:    ret
3504entry:
3505  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(float* %base, i64 %offset, i64 %vl)
3506  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
3507  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
3508  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
3509  ret <vscale x 2 x float> %3
3510}
3511
3512declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(float*, i64, i64)
3513declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, <vscale x 2 x i1>, i64)
3514
3515define <vscale x 2 x float> @test_vlsseg3_nxv2f32(float* %base, i64 %offset, i64 %vl) {
3516; CHECK-LABEL: test_vlsseg3_nxv2f32:
3517; CHECK:       # %bb.0: # %entry
3518; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
3519; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1
3520; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
3521; CHECK-NEXT:    ret
3522entry:
3523  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(float* %base, i64 %offset, i64 %vl)
3524  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
3525  ret <vscale x 2 x float> %1
3526}
3527
3528define <vscale x 2 x float> @test_vlsseg3_mask_nxv2f32(float* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3529; CHECK-LABEL: test_vlsseg3_mask_nxv2f32:
3530; CHECK:       # %bb.0: # %entry
3531; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
3532; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1
3533; CHECK-NEXT:    vmv1r.v v8, v7
3534; CHECK-NEXT:    vmv1r.v v9, v7
3535; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
3536; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1, v0.t
3537; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
3538; CHECK-NEXT:    ret
3539entry:
3540  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(float* %base, i64 %offset, i64 %vl)
3541  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
3542  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
3543  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
3544  ret <vscale x 2 x float> %3
3545}
3546
3547declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(float*, i64, i64)
3548declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, <vscale x 2 x i1>, i64)
3549
3550define <vscale x 2 x float> @test_vlsseg4_nxv2f32(float* %base, i64 %offset, i64 %vl) {
3551; CHECK-LABEL: test_vlsseg4_nxv2f32:
3552; CHECK:       # %bb.0: # %entry
3553; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
3554; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1
3555; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
3556; CHECK-NEXT:    ret
3557entry:
3558  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(float* %base, i64 %offset, i64 %vl)
3559  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
3560  ret <vscale x 2 x float> %1
3561}
3562
3563define <vscale x 2 x float> @test_vlsseg4_mask_nxv2f32(float* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3564; CHECK-LABEL: test_vlsseg4_mask_nxv2f32:
3565; CHECK:       # %bb.0: # %entry
3566; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
3567; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1
3568; CHECK-NEXT:    vmv1r.v v8, v7
3569; CHECK-NEXT:    vmv1r.v v9, v7
3570; CHECK-NEXT:    vmv1r.v v10, v7
3571; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
3572; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1, v0.t
3573; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
3574; CHECK-NEXT:    ret
3575entry:
3576  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(float* %base, i64 %offset, i64 %vl)
3577  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
3578  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
3579  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
3580  ret <vscale x 2 x float> %3
3581}
3582
3583declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(float*, i64, i64)
3584declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, <vscale x 2 x i1>, i64)
3585
3586define <vscale x 2 x float> @test_vlsseg5_nxv2f32(float* %base, i64 %offset, i64 %vl) {
3587; CHECK-LABEL: test_vlsseg5_nxv2f32:
3588; CHECK:       # %bb.0: # %entry
3589; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
3590; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1
3591; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
3592; CHECK-NEXT:    ret
3593entry:
3594  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(float* %base, i64 %offset, i64 %vl)
3595  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
3596  ret <vscale x 2 x float> %1
3597}
3598
3599define <vscale x 2 x float> @test_vlsseg5_mask_nxv2f32(float* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3600; CHECK-LABEL: test_vlsseg5_mask_nxv2f32:
3601; CHECK:       # %bb.0: # %entry
3602; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
3603; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1
3604; CHECK-NEXT:    vmv1r.v v8, v7
3605; CHECK-NEXT:    vmv1r.v v9, v7
3606; CHECK-NEXT:    vmv1r.v v10, v7
3607; CHECK-NEXT:    vmv1r.v v11, v7
3608; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
3609; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1, v0.t
3610; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
3611; CHECK-NEXT:    ret
3612entry:
3613  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(float* %base, i64 %offset, i64 %vl)
3614  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
3615  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
3616  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
3617  ret <vscale x 2 x float> %3
3618}
3619
3620declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(float*, i64, i64)
3621declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, <vscale x 2 x i1>, i64)
3622
3623define <vscale x 2 x float> @test_vlsseg6_nxv2f32(float* %base, i64 %offset, i64 %vl) {
3624; CHECK-LABEL: test_vlsseg6_nxv2f32:
3625; CHECK:       # %bb.0: # %entry
3626; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
3627; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1
3628; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
3629; CHECK-NEXT:    ret
3630entry:
3631  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(float* %base, i64 %offset, i64 %vl)
3632  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
3633  ret <vscale x 2 x float> %1
3634}
3635
3636define <vscale x 2 x float> @test_vlsseg6_mask_nxv2f32(float* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3637; CHECK-LABEL: test_vlsseg6_mask_nxv2f32:
3638; CHECK:       # %bb.0: # %entry
3639; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
3640; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1
3641; CHECK-NEXT:    vmv1r.v v8, v7
3642; CHECK-NEXT:    vmv1r.v v9, v7
3643; CHECK-NEXT:    vmv1r.v v10, v7
3644; CHECK-NEXT:    vmv1r.v v11, v7
3645; CHECK-NEXT:    vmv1r.v v12, v7
3646; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
3647; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1, v0.t
3648; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
3649; CHECK-NEXT:    ret
3650entry:
3651  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(float* %base, i64 %offset, i64 %vl)
3652  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
3653  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
3654  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
3655  ret <vscale x 2 x float> %3
3656}
3657
3658declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(float*, i64, i64)
3659declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, <vscale x 2 x i1>, i64)
3660
3661define <vscale x 2 x float> @test_vlsseg7_nxv2f32(float* %base, i64 %offset, i64 %vl) {
3662; CHECK-LABEL: test_vlsseg7_nxv2f32:
3663; CHECK:       # %bb.0: # %entry
3664; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
3665; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1
3666; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
3667; CHECK-NEXT:    ret
3668entry:
3669  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(float* %base, i64 %offset, i64 %vl)
3670  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
3671  ret <vscale x 2 x float> %1
3672}
3673
3674define <vscale x 2 x float> @test_vlsseg7_mask_nxv2f32(float* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3675; CHECK-LABEL: test_vlsseg7_mask_nxv2f32:
3676; CHECK:       # %bb.0: # %entry
3677; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
3678; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1
3679; CHECK-NEXT:    vmv1r.v v8, v7
3680; CHECK-NEXT:    vmv1r.v v9, v7
3681; CHECK-NEXT:    vmv1r.v v10, v7
3682; CHECK-NEXT:    vmv1r.v v11, v7
3683; CHECK-NEXT:    vmv1r.v v12, v7
3684; CHECK-NEXT:    vmv1r.v v13, v7
3685; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
3686; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1, v0.t
3687; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
3688; CHECK-NEXT:    ret
3689entry:
3690  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(float* %base, i64 %offset, i64 %vl)
3691  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
3692  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
3693  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
3694  ret <vscale x 2 x float> %3
3695}
3696
3697declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(float*, i64, i64)
3698declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, <vscale x 2 x i1>, i64)
3699
3700define <vscale x 2 x float> @test_vlsseg8_nxv2f32(float* %base, i64 %offset, i64 %vl) {
3701; CHECK-LABEL: test_vlsseg8_nxv2f32:
3702; CHECK:       # %bb.0: # %entry
3703; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
3704; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1
3705; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
3706; CHECK-NEXT:    ret
3707entry:
3708  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(float* %base, i64 %offset, i64 %vl)
3709  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
3710  ret <vscale x 2 x float> %1
3711}
3712
3713define <vscale x 2 x float> @test_vlsseg8_mask_nxv2f32(float* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3714; CHECK-LABEL: test_vlsseg8_mask_nxv2f32:
3715; CHECK:       # %bb.0: # %entry
3716; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
3717; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1
3718; CHECK-NEXT:    vmv1r.v v8, v7
3719; CHECK-NEXT:    vmv1r.v v9, v7
3720; CHECK-NEXT:    vmv1r.v v10, v7
3721; CHECK-NEXT:    vmv1r.v v11, v7
3722; CHECK-NEXT:    vmv1r.v v12, v7
3723; CHECK-NEXT:    vmv1r.v v13, v7
3724; CHECK-NEXT:    vmv1r.v v14, v7
3725; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
3726; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1, v0.t
3727; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
3728; CHECK-NEXT:    ret
3729entry:
3730  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(float* %base, i64 %offset, i64 %vl)
3731  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
3732  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
3733  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
3734  ret <vscale x 2 x float> %3
3735}
3736
3737declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(half*, i64, i64)
3738declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, <vscale x 1 x i1>, i64)
3739
3740define <vscale x 1 x half> @test_vlsseg2_nxv1f16(half* %base, i64 %offset, i64 %vl) {
3741; CHECK-LABEL: test_vlsseg2_nxv1f16:
3742; CHECK:       # %bb.0: # %entry
3743; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
3744; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
3745; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
3746; CHECK-NEXT:    ret
3747entry:
3748  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(half* %base, i64 %offset, i64 %vl)
3749  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
3750  ret <vscale x 1 x half> %1
3751}
3752
3753define <vscale x 1 x half> @test_vlsseg2_mask_nxv1f16(half* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3754; CHECK-LABEL: test_vlsseg2_mask_nxv1f16:
3755; CHECK:       # %bb.0: # %entry
3756; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
3757; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
3758; CHECK-NEXT:    vmv1r.v v8, v7
3759; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, tu, mu
3760; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1, v0.t
3761; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
3762; CHECK-NEXT:    ret
3763entry:
3764  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(half* %base, i64 %offset, i64 %vl)
3765  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
3766  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
3767  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
3768  ret <vscale x 1 x half> %3
3769}
3770
3771declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(half*, i64, i64)
3772declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, <vscale x 1 x i1>, i64)
3773
3774define <vscale x 1 x half> @test_vlsseg3_nxv1f16(half* %base, i64 %offset, i64 %vl) {
3775; CHECK-LABEL: test_vlsseg3_nxv1f16:
3776; CHECK:       # %bb.0: # %entry
3777; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
3778; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1
3779; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
3780; CHECK-NEXT:    ret
3781entry:
3782  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(half* %base, i64 %offset, i64 %vl)
3783  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
3784  ret <vscale x 1 x half> %1
3785}
3786
3787define <vscale x 1 x half> @test_vlsseg3_mask_nxv1f16(half* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3788; CHECK-LABEL: test_vlsseg3_mask_nxv1f16:
3789; CHECK:       # %bb.0: # %entry
3790; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
3791; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1
3792; CHECK-NEXT:    vmv1r.v v8, v7
3793; CHECK-NEXT:    vmv1r.v v9, v7
3794; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, tu, mu
3795; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1, v0.t
3796; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
3797; CHECK-NEXT:    ret
3798entry:
3799  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(half* %base, i64 %offset, i64 %vl)
3800  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
3801  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
3802  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
3803  ret <vscale x 1 x half> %3
3804}
3805
3806declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(half*, i64, i64)
3807declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, <vscale x 1 x i1>, i64)
3808
3809define <vscale x 1 x half> @test_vlsseg4_nxv1f16(half* %base, i64 %offset, i64 %vl) {
3810; CHECK-LABEL: test_vlsseg4_nxv1f16:
3811; CHECK:       # %bb.0: # %entry
3812; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
3813; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1
3814; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
3815; CHECK-NEXT:    ret
3816entry:
3817  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(half* %base, i64 %offset, i64 %vl)
3818  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
3819  ret <vscale x 1 x half> %1
3820}
3821
3822define <vscale x 1 x half> @test_vlsseg4_mask_nxv1f16(half* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3823; CHECK-LABEL: test_vlsseg4_mask_nxv1f16:
3824; CHECK:       # %bb.0: # %entry
3825; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
3826; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1
3827; CHECK-NEXT:    vmv1r.v v8, v7
3828; CHECK-NEXT:    vmv1r.v v9, v7
3829; CHECK-NEXT:    vmv1r.v v10, v7
3830; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, tu, mu
3831; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1, v0.t
3832; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
3833; CHECK-NEXT:    ret
3834entry:
3835  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(half* %base, i64 %offset, i64 %vl)
3836  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
3837  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
3838  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
3839  ret <vscale x 1 x half> %3
3840}
3841
3842declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(half*, i64, i64)
3843declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, <vscale x 1 x i1>, i64)
3844
3845define <vscale x 1 x half> @test_vlsseg5_nxv1f16(half* %base, i64 %offset, i64 %vl) {
3846; CHECK-LABEL: test_vlsseg5_nxv1f16:
3847; CHECK:       # %bb.0: # %entry
3848; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
3849; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1
3850; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
3851; CHECK-NEXT:    ret
3852entry:
3853  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(half* %base, i64 %offset, i64 %vl)
3854  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
3855  ret <vscale x 1 x half> %1
3856}
3857
3858define <vscale x 1 x half> @test_vlsseg5_mask_nxv1f16(half* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3859; CHECK-LABEL: test_vlsseg5_mask_nxv1f16:
3860; CHECK:       # %bb.0: # %entry
3861; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
3862; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1
3863; CHECK-NEXT:    vmv1r.v v8, v7
3864; CHECK-NEXT:    vmv1r.v v9, v7
3865; CHECK-NEXT:    vmv1r.v v10, v7
3866; CHECK-NEXT:    vmv1r.v v11, v7
3867; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, tu, mu
3868; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1, v0.t
3869; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
3870; CHECK-NEXT:    ret
3871entry:
3872  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(half* %base, i64 %offset, i64 %vl)
3873  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
3874  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
3875  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
3876  ret <vscale x 1 x half> %3
3877}
3878
3879declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(half*, i64, i64)
3880declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, <vscale x 1 x i1>, i64)
3881
3882define <vscale x 1 x half> @test_vlsseg6_nxv1f16(half* %base, i64 %offset, i64 %vl) {
3883; CHECK-LABEL: test_vlsseg6_nxv1f16:
3884; CHECK:       # %bb.0: # %entry
3885; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
3886; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1
3887; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
3888; CHECK-NEXT:    ret
3889entry:
3890  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(half* %base, i64 %offset, i64 %vl)
3891  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
3892  ret <vscale x 1 x half> %1
3893}
3894
3895define <vscale x 1 x half> @test_vlsseg6_mask_nxv1f16(half* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3896; CHECK-LABEL: test_vlsseg6_mask_nxv1f16:
3897; CHECK:       # %bb.0: # %entry
3898; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
3899; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1
3900; CHECK-NEXT:    vmv1r.v v8, v7
3901; CHECK-NEXT:    vmv1r.v v9, v7
3902; CHECK-NEXT:    vmv1r.v v10, v7
3903; CHECK-NEXT:    vmv1r.v v11, v7
3904; CHECK-NEXT:    vmv1r.v v12, v7
3905; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, tu, mu
3906; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1, v0.t
3907; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
3908; CHECK-NEXT:    ret
3909entry:
3910  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(half* %base, i64 %offset, i64 %vl)
3911  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
3912  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
3913  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
3914  ret <vscale x 1 x half> %3
3915}
3916
3917declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(half*, i64, i64)
3918declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, <vscale x 1 x i1>, i64)
3919
3920define <vscale x 1 x half> @test_vlsseg7_nxv1f16(half* %base, i64 %offset, i64 %vl) {
3921; CHECK-LABEL: test_vlsseg7_nxv1f16:
3922; CHECK:       # %bb.0: # %entry
3923; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
3924; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1
3925; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
3926; CHECK-NEXT:    ret
3927entry:
3928  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(half* %base, i64 %offset, i64 %vl)
3929  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
3930  ret <vscale x 1 x half> %1
3931}
3932
3933define <vscale x 1 x half> @test_vlsseg7_mask_nxv1f16(half* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3934; CHECK-LABEL: test_vlsseg7_mask_nxv1f16:
3935; CHECK:       # %bb.0: # %entry
3936; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
3937; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1
3938; CHECK-NEXT:    vmv1r.v v8, v7
3939; CHECK-NEXT:    vmv1r.v v9, v7
3940; CHECK-NEXT:    vmv1r.v v10, v7
3941; CHECK-NEXT:    vmv1r.v v11, v7
3942; CHECK-NEXT:    vmv1r.v v12, v7
3943; CHECK-NEXT:    vmv1r.v v13, v7
3944; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, tu, mu
3945; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1, v0.t
3946; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
3947; CHECK-NEXT:    ret
3948entry:
3949  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(half* %base, i64 %offset, i64 %vl)
3950  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
3951  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
3952  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
3953  ret <vscale x 1 x half> %3
3954}
3955
3956declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(half*, i64, i64)
3957declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, <vscale x 1 x i1>, i64)
3958
3959define <vscale x 1 x half> @test_vlsseg8_nxv1f16(half* %base, i64 %offset, i64 %vl) {
3960; CHECK-LABEL: test_vlsseg8_nxv1f16:
3961; CHECK:       # %bb.0: # %entry
3962; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
3963; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1
3964; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
3965; CHECK-NEXT:    ret
3966entry:
3967  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(half* %base, i64 %offset, i64 %vl)
3968  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
3969  ret <vscale x 1 x half> %1
3970}
3971
3972define <vscale x 1 x half> @test_vlsseg8_mask_nxv1f16(half* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3973; CHECK-LABEL: test_vlsseg8_mask_nxv1f16:
3974; CHECK:       # %bb.0: # %entry
3975; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
3976; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1
3977; CHECK-NEXT:    vmv1r.v v8, v7
3978; CHECK-NEXT:    vmv1r.v v9, v7
3979; CHECK-NEXT:    vmv1r.v v10, v7
3980; CHECK-NEXT:    vmv1r.v v11, v7
3981; CHECK-NEXT:    vmv1r.v v12, v7
3982; CHECK-NEXT:    vmv1r.v v13, v7
3983; CHECK-NEXT:    vmv1r.v v14, v7
3984; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, tu, mu
3985; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1, v0.t
3986; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
3987; CHECK-NEXT:    ret
3988entry:
3989  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(half* %base, i64 %offset, i64 %vl)
3990  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
3991  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
3992  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
3993  ret <vscale x 1 x half> %3
3994}
3995
3996declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(float*, i64, i64)
3997declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, <vscale x 1 x i1>, i64)
3998
3999define <vscale x 1 x float> @test_vlsseg2_nxv1f32(float* %base, i64 %offset, i64 %vl) {
4000; CHECK-LABEL: test_vlsseg2_nxv1f32:
4001; CHECK:       # %bb.0: # %entry
4002; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
4003; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1
4004; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
4005; CHECK-NEXT:    ret
4006entry:
4007  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(float* %base, i64 %offset, i64 %vl)
4008  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
4009  ret <vscale x 1 x float> %1
4010}
4011
4012define <vscale x 1 x float> @test_vlsseg2_mask_nxv1f32(float* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
4013; CHECK-LABEL: test_vlsseg2_mask_nxv1f32:
4014; CHECK:       # %bb.0: # %entry
4015; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
4016; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1
4017; CHECK-NEXT:    vmv1r.v v8, v7
4018; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, tu, mu
4019; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1, v0.t
4020; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
4021; CHECK-NEXT:    ret
4022entry:
4023  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(float* %base, i64 %offset, i64 %vl)
4024  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
4025  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
4026  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
4027  ret <vscale x 1 x float> %3
4028}
4029
4030declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(float*, i64, i64)
4031declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, <vscale x 1 x i1>, i64)
4032
4033define <vscale x 1 x float> @test_vlsseg3_nxv1f32(float* %base, i64 %offset, i64 %vl) {
4034; CHECK-LABEL: test_vlsseg3_nxv1f32:
4035; CHECK:       # %bb.0: # %entry
4036; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
4037; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1
4038; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
4039; CHECK-NEXT:    ret
4040entry:
4041  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(float* %base, i64 %offset, i64 %vl)
4042  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
4043  ret <vscale x 1 x float> %1
4044}
4045
4046define <vscale x 1 x float> @test_vlsseg3_mask_nxv1f32(float* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
4047; CHECK-LABEL: test_vlsseg3_mask_nxv1f32:
4048; CHECK:       # %bb.0: # %entry
4049; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
4050; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1
4051; CHECK-NEXT:    vmv1r.v v8, v7
4052; CHECK-NEXT:    vmv1r.v v9, v7
4053; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, tu, mu
4054; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1, v0.t
4055; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
4056; CHECK-NEXT:    ret
4057entry:
4058  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(float* %base, i64 %offset, i64 %vl)
4059  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
4060  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
4061  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
4062  ret <vscale x 1 x float> %3
4063}
4064
4065declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(float*, i64, i64)
4066declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, <vscale x 1 x i1>, i64)
4067
4068define <vscale x 1 x float> @test_vlsseg4_nxv1f32(float* %base, i64 %offset, i64 %vl) {
4069; CHECK-LABEL: test_vlsseg4_nxv1f32:
4070; CHECK:       # %bb.0: # %entry
4071; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
4072; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1
4073; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
4074; CHECK-NEXT:    ret
4075entry:
4076  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(float* %base, i64 %offset, i64 %vl)
4077  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
4078  ret <vscale x 1 x float> %1
4079}
4080
4081define <vscale x 1 x float> @test_vlsseg4_mask_nxv1f32(float* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
4082; CHECK-LABEL: test_vlsseg4_mask_nxv1f32:
4083; CHECK:       # %bb.0: # %entry
4084; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
4085; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1
4086; CHECK-NEXT:    vmv1r.v v8, v7
4087; CHECK-NEXT:    vmv1r.v v9, v7
4088; CHECK-NEXT:    vmv1r.v v10, v7
4089; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, tu, mu
4090; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1, v0.t
4091; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
4092; CHECK-NEXT:    ret
4093entry:
4094  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(float* %base, i64 %offset, i64 %vl)
4095  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
4096  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
4097  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
4098  ret <vscale x 1 x float> %3
4099}
4100
4101declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(float*, i64, i64)
4102declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, <vscale x 1 x i1>, i64)
4103
4104define <vscale x 1 x float> @test_vlsseg5_nxv1f32(float* %base, i64 %offset, i64 %vl) {
4105; CHECK-LABEL: test_vlsseg5_nxv1f32:
4106; CHECK:       # %bb.0: # %entry
4107; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
4108; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1
4109; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
4110; CHECK-NEXT:    ret
4111entry:
4112  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(float* %base, i64 %offset, i64 %vl)
4113  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
4114  ret <vscale x 1 x float> %1
4115}
4116
4117define <vscale x 1 x float> @test_vlsseg5_mask_nxv1f32(float* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
4118; CHECK-LABEL: test_vlsseg5_mask_nxv1f32:
4119; CHECK:       # %bb.0: # %entry
4120; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
4121; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1
4122; CHECK-NEXT:    vmv1r.v v8, v7
4123; CHECK-NEXT:    vmv1r.v v9, v7
4124; CHECK-NEXT:    vmv1r.v v10, v7
4125; CHECK-NEXT:    vmv1r.v v11, v7
4126; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, tu, mu
4127; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1, v0.t
4128; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
4129; CHECK-NEXT:    ret
4130entry:
4131  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(float* %base, i64 %offset, i64 %vl)
4132  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
4133  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
4134  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
4135  ret <vscale x 1 x float> %3
4136}
4137
4138declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(float*, i64, i64)
4139declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, <vscale x 1 x i1>, i64)
4140
4141define <vscale x 1 x float> @test_vlsseg6_nxv1f32(float* %base, i64 %offset, i64 %vl) {
4142; CHECK-LABEL: test_vlsseg6_nxv1f32:
4143; CHECK:       # %bb.0: # %entry
4144; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
4145; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1
4146; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
4147; CHECK-NEXT:    ret
4148entry:
4149  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(float* %base, i64 %offset, i64 %vl)
4150  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
4151  ret <vscale x 1 x float> %1
4152}
4153
4154define <vscale x 1 x float> @test_vlsseg6_mask_nxv1f32(float* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
4155; CHECK-LABEL: test_vlsseg6_mask_nxv1f32:
4156; CHECK:       # %bb.0: # %entry
4157; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
4158; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1
4159; CHECK-NEXT:    vmv1r.v v8, v7
4160; CHECK-NEXT:    vmv1r.v v9, v7
4161; CHECK-NEXT:    vmv1r.v v10, v7
4162; CHECK-NEXT:    vmv1r.v v11, v7
4163; CHECK-NEXT:    vmv1r.v v12, v7
4164; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, tu, mu
4165; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1, v0.t
4166; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
4167; CHECK-NEXT:    ret
4168entry:
4169  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(float* %base, i64 %offset, i64 %vl)
4170  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
4171  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
4172  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
4173  ret <vscale x 1 x float> %3
4174}
4175
4176declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(float*, i64, i64)
4177declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, <vscale x 1 x i1>, i64)
4178
4179define <vscale x 1 x float> @test_vlsseg7_nxv1f32(float* %base, i64 %offset, i64 %vl) {
4180; CHECK-LABEL: test_vlsseg7_nxv1f32:
4181; CHECK:       # %bb.0: # %entry
4182; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
4183; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1
4184; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
4185; CHECK-NEXT:    ret
4186entry:
4187  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(float* %base, i64 %offset, i64 %vl)
4188  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
4189  ret <vscale x 1 x float> %1
4190}
4191
4192define <vscale x 1 x float> @test_vlsseg7_mask_nxv1f32(float* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
4193; CHECK-LABEL: test_vlsseg7_mask_nxv1f32:
4194; CHECK:       # %bb.0: # %entry
4195; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
4196; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1
4197; CHECK-NEXT:    vmv1r.v v8, v7
4198; CHECK-NEXT:    vmv1r.v v9, v7
4199; CHECK-NEXT:    vmv1r.v v10, v7
4200; CHECK-NEXT:    vmv1r.v v11, v7
4201; CHECK-NEXT:    vmv1r.v v12, v7
4202; CHECK-NEXT:    vmv1r.v v13, v7
4203; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, tu, mu
4204; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1, v0.t
4205; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
4206; CHECK-NEXT:    ret
4207entry:
4208  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(float* %base, i64 %offset, i64 %vl)
4209  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
4210  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
4211  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
4212  ret <vscale x 1 x float> %3
4213}
4214
4215declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(float*, i64, i64)
4216declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, <vscale x 1 x i1>, i64)
4217
4218define <vscale x 1 x float> @test_vlsseg8_nxv1f32(float* %base, i64 %offset, i64 %vl) {
4219; CHECK-LABEL: test_vlsseg8_nxv1f32:
4220; CHECK:       # %bb.0: # %entry
4221; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
4222; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1
4223; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
4224; CHECK-NEXT:    ret
4225entry:
4226  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(float* %base, i64 %offset, i64 %vl)
4227  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
4228  ret <vscale x 1 x float> %1
4229}
4230
4231define <vscale x 1 x float> @test_vlsseg8_mask_nxv1f32(float* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
4232; CHECK-LABEL: test_vlsseg8_mask_nxv1f32:
4233; CHECK:       # %bb.0: # %entry
4234; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
4235; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1
4236; CHECK-NEXT:    vmv1r.v v8, v7
4237; CHECK-NEXT:    vmv1r.v v9, v7
4238; CHECK-NEXT:    vmv1r.v v10, v7
4239; CHECK-NEXT:    vmv1r.v v11, v7
4240; CHECK-NEXT:    vmv1r.v v12, v7
4241; CHECK-NEXT:    vmv1r.v v13, v7
4242; CHECK-NEXT:    vmv1r.v v14, v7
4243; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, tu, mu
4244; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1, v0.t
4245; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
4246; CHECK-NEXT:    ret
4247entry:
4248  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(float* %base, i64 %offset, i64 %vl)
4249  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
4250  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
4251  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
4252  ret <vscale x 1 x float> %3
4253}
4254
4255declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(half*, i64, i64)
4256declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>, half*, i64, <vscale x 8 x i1>, i64)
4257
4258define <vscale x 8 x half> @test_vlsseg2_nxv8f16(half* %base, i64 %offset, i64 %vl) {
4259; CHECK-LABEL: test_vlsseg2_nxv8f16:
4260; CHECK:       # %bb.0: # %entry
4261; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
4262; CHECK-NEXT:    vlsseg2e16.v v6, (a0), a1
4263; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
4264; CHECK-NEXT:    ret
4265entry:
4266  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(half* %base, i64 %offset, i64 %vl)
4267  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
4268  ret <vscale x 8 x half> %1
4269}
4270
4271define <vscale x 8 x half> @test_vlsseg2_mask_nxv8f16(half* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
4272; CHECK-LABEL: test_vlsseg2_mask_nxv8f16:
4273; CHECK:       # %bb.0: # %entry
4274; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
4275; CHECK-NEXT:    vlsseg2e16.v v6, (a0), a1
4276; CHECK-NEXT:    vmv2r.v v8, v6
4277; CHECK-NEXT:    vsetvli zero, zero, e16, m2, tu, mu
4278; CHECK-NEXT:    vlsseg2e16.v v6, (a0), a1, v0.t
4279; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
4280; CHECK-NEXT:    ret
4281entry:
4282  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(half* %base, i64 %offset, i64 %vl)
4283  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
4284  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.mask.nxv8f16(<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
4285  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
4286  ret <vscale x 8 x half> %3
4287}
4288
4289declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(half*, i64, i64)
4290declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, i64, <vscale x 8 x i1>, i64)
4291
4292define <vscale x 8 x half> @test_vlsseg3_nxv8f16(half* %base, i64 %offset, i64 %vl) {
4293; CHECK-LABEL: test_vlsseg3_nxv8f16:
4294; CHECK:       # %bb.0: # %entry
4295; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
4296; CHECK-NEXT:    vlsseg3e16.v v6, (a0), a1
4297; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
4298; CHECK-NEXT:    ret
4299entry:
4300  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(half* %base, i64 %offset, i64 %vl)
4301  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
4302  ret <vscale x 8 x half> %1
4303}
4304
4305define <vscale x 8 x half> @test_vlsseg3_mask_nxv8f16(half* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
4306; CHECK-LABEL: test_vlsseg3_mask_nxv8f16:
4307; CHECK:       # %bb.0: # %entry
4308; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
4309; CHECK-NEXT:    vlsseg3e16.v v6, (a0), a1
4310; CHECK-NEXT:    vmv2r.v v8, v6
4311; CHECK-NEXT:    vmv2r.v v10, v6
4312; CHECK-NEXT:    vsetvli zero, zero, e16, m2, tu, mu
4313; CHECK-NEXT:    vlsseg3e16.v v6, (a0), a1, v0.t
4314; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
4315; CHECK-NEXT:    ret
4316entry:
4317  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(half* %base, i64 %offset, i64 %vl)
4318  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
4319  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.mask.nxv8f16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
4320  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
4321  ret <vscale x 8 x half> %3
4322}
4323
4324declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(half*, i64, i64)
4325declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, i64, <vscale x 8 x i1>, i64)
4326
4327define <vscale x 8 x half> @test_vlsseg4_nxv8f16(half* %base, i64 %offset, i64 %vl) {
4328; CHECK-LABEL: test_vlsseg4_nxv8f16:
4329; CHECK:       # %bb.0: # %entry
4330; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
4331; CHECK-NEXT:    vlsseg4e16.v v6, (a0), a1
4332; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
4333; CHECK-NEXT:    ret
4334entry:
4335  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(half* %base, i64 %offset, i64 %vl)
4336  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
4337  ret <vscale x 8 x half> %1
4338}
4339
4340define <vscale x 8 x half> @test_vlsseg4_mask_nxv8f16(half* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
4341; CHECK-LABEL: test_vlsseg4_mask_nxv8f16:
4342; CHECK:       # %bb.0: # %entry
4343; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
4344; CHECK-NEXT:    vlsseg4e16.v v6, (a0), a1
4345; CHECK-NEXT:    vmv2r.v v8, v6
4346; CHECK-NEXT:    vmv2r.v v10, v6
4347; CHECK-NEXT:    vmv2r.v v12, v6
4348; CHECK-NEXT:    vsetvli zero, zero, e16, m2, tu, mu
4349; CHECK-NEXT:    vlsseg4e16.v v6, (a0), a1, v0.t
4350; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
4351; CHECK-NEXT:    ret
4352entry:
4353  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(half* %base, i64 %offset, i64 %vl)
4354  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
4355  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.mask.nxv8f16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
4356  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
4357  ret <vscale x 8 x half> %3
4358}
4359
4360declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(float*, i64, i64)
4361declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.mask.nxv8f32(<vscale x 8 x float>,<vscale x 8 x float>, float*, i64, <vscale x 8 x i1>, i64)
4362
4363define <vscale x 8 x float> @test_vlsseg2_nxv8f32(float* %base, i64 %offset, i64 %vl) {
4364; CHECK-LABEL: test_vlsseg2_nxv8f32:
4365; CHECK:       # %bb.0: # %entry
4366; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
4367; CHECK-NEXT:    vlsseg2e32.v v4, (a0), a1
4368; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
4369; CHECK-NEXT:    ret
4370entry:
4371  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(float* %base, i64 %offset, i64 %vl)
4372  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
4373  ret <vscale x 8 x float> %1
4374}
4375
4376define <vscale x 8 x float> @test_vlsseg2_mask_nxv8f32(float* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
4377; CHECK-LABEL: test_vlsseg2_mask_nxv8f32:
4378; CHECK:       # %bb.0: # %entry
4379; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
4380; CHECK-NEXT:    vlsseg2e32.v v4, (a0), a1
4381; CHECK-NEXT:    vmv4r.v v8, v4
4382; CHECK-NEXT:    vsetvli zero, zero, e32, m4, tu, mu
4383; CHECK-NEXT:    vlsseg2e32.v v4, (a0), a1, v0.t
4384; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
4385; CHECK-NEXT:    ret
4386entry:
4387  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(float* %base, i64 %offset, i64 %vl)
4388  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 0
4389  %2 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.mask.nxv8f32(<vscale x 8 x float> %1,<vscale x 8 x float> %1, float* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
4390  %3 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %2, 1
4391  ret <vscale x 8 x float> %3
4392}
4393
4394declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(double*, i64, i64)
4395declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>, double*, i64, <vscale x 2 x i1>, i64)
4396
4397define <vscale x 2 x double> @test_vlsseg2_nxv2f64(double* %base, i64 %offset, i64 %vl) {
4398; CHECK-LABEL: test_vlsseg2_nxv2f64:
4399; CHECK:       # %bb.0: # %entry
4400; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
4401; CHECK-NEXT:    vlsseg2e64.v v6, (a0), a1
4402; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
4403; CHECK-NEXT:    ret
4404entry:
4405  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(double* %base, i64 %offset, i64 %vl)
4406  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
4407  ret <vscale x 2 x double> %1
4408}
4409
4410define <vscale x 2 x double> @test_vlsseg2_mask_nxv2f64(double* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
4411; CHECK-LABEL: test_vlsseg2_mask_nxv2f64:
4412; CHECK:       # %bb.0: # %entry
4413; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
4414; CHECK-NEXT:    vlsseg2e64.v v6, (a0), a1
4415; CHECK-NEXT:    vmv2r.v v8, v6
4416; CHECK-NEXT:    vsetvli zero, zero, e64, m2, tu, mu
4417; CHECK-NEXT:    vlsseg2e64.v v6, (a0), a1, v0.t
4418; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
4419; CHECK-NEXT:    ret
4420entry:
4421  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(double* %base, i64 %offset, i64 %vl)
4422  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
4423  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.mask.nxv2f64(<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
4424  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
4425  ret <vscale x 2 x double> %3
4426}
4427
4428declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(double*, i64, i64)
4429declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, i64, <vscale x 2 x i1>, i64)
4430
4431define <vscale x 2 x double> @test_vlsseg3_nxv2f64(double* %base, i64 %offset, i64 %vl) {
4432; CHECK-LABEL: test_vlsseg3_nxv2f64:
4433; CHECK:       # %bb.0: # %entry
4434; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
4435; CHECK-NEXT:    vlsseg3e64.v v6, (a0), a1
4436; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
4437; CHECK-NEXT:    ret
4438entry:
4439  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(double* %base, i64 %offset, i64 %vl)
4440  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
4441  ret <vscale x 2 x double> %1
4442}
4443
4444define <vscale x 2 x double> @test_vlsseg3_mask_nxv2f64(double* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
4445; CHECK-LABEL: test_vlsseg3_mask_nxv2f64:
4446; CHECK:       # %bb.0: # %entry
4447; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
4448; CHECK-NEXT:    vlsseg3e64.v v6, (a0), a1
4449; CHECK-NEXT:    vmv2r.v v8, v6
4450; CHECK-NEXT:    vmv2r.v v10, v6
4451; CHECK-NEXT:    vsetvli zero, zero, e64, m2, tu, mu
4452; CHECK-NEXT:    vlsseg3e64.v v6, (a0), a1, v0.t
4453; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
4454; CHECK-NEXT:    ret
4455entry:
4456  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(double* %base, i64 %offset, i64 %vl)
4457  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
4458  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.mask.nxv2f64(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
4459  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
4460  ret <vscale x 2 x double> %3
4461}
4462
4463declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(double*, i64, i64)
4464declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, i64, <vscale x 2 x i1>, i64)
4465
4466define <vscale x 2 x double> @test_vlsseg4_nxv2f64(double* %base, i64 %offset, i64 %vl) {
4467; CHECK-LABEL: test_vlsseg4_nxv2f64:
4468; CHECK:       # %bb.0: # %entry
4469; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
4470; CHECK-NEXT:    vlsseg4e64.v v6, (a0), a1
4471; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
4472; CHECK-NEXT:    ret
4473entry:
4474  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(double* %base, i64 %offset, i64 %vl)
4475  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
4476  ret <vscale x 2 x double> %1
4477}
4478
4479define <vscale x 2 x double> @test_vlsseg4_mask_nxv2f64(double* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
4480; CHECK-LABEL: test_vlsseg4_mask_nxv2f64:
4481; CHECK:       # %bb.0: # %entry
4482; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
4483; CHECK-NEXT:    vlsseg4e64.v v6, (a0), a1
4484; CHECK-NEXT:    vmv2r.v v8, v6
4485; CHECK-NEXT:    vmv2r.v v10, v6
4486; CHECK-NEXT:    vmv2r.v v12, v6
4487; CHECK-NEXT:    vsetvli zero, zero, e64, m2, tu, mu
4488; CHECK-NEXT:    vlsseg4e64.v v6, (a0), a1, v0.t
4489; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
4490; CHECK-NEXT:    ret
4491entry:
4492  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(double* %base, i64 %offset, i64 %vl)
4493  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
4494  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.mask.nxv2f64(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
4495  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
4496  ret <vscale x 2 x double> %3
4497}
4498
4499declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(half*, i64, i64)
4500declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, <vscale x 4 x i1>, i64)
4501
4502define <vscale x 4 x half> @test_vlsseg2_nxv4f16(half* %base, i64 %offset, i64 %vl) {
4503; CHECK-LABEL: test_vlsseg2_nxv4f16:
4504; CHECK:       # %bb.0: # %entry
4505; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
4506; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
4507; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
4508; CHECK-NEXT:    ret
4509entry:
4510  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(half* %base, i64 %offset, i64 %vl)
4511  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
4512  ret <vscale x 4 x half> %1
4513}
4514
4515define <vscale x 4 x half> @test_vlsseg2_mask_nxv4f16(half* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
4516; CHECK-LABEL: test_vlsseg2_mask_nxv4f16:
4517; CHECK:       # %bb.0: # %entry
4518; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
4519; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
4520; CHECK-NEXT:    vmv1r.v v8, v7
4521; CHECK-NEXT:    vsetvli zero, zero, e16, m1, tu, mu
4522; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1, v0.t
4523; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
4524; CHECK-NEXT:    ret
4525entry:
4526  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(half* %base, i64 %offset, i64 %vl)
4527  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
4528  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
4529  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
4530  ret <vscale x 4 x half> %3
4531}
4532
4533declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(half*, i64, i64)
4534declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, <vscale x 4 x i1>, i64)
4535
4536define <vscale x 4 x half> @test_vlsseg3_nxv4f16(half* %base, i64 %offset, i64 %vl) {
4537; CHECK-LABEL: test_vlsseg3_nxv4f16:
4538; CHECK:       # %bb.0: # %entry
4539; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
4540; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1
4541; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
4542; CHECK-NEXT:    ret
4543entry:
4544  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(half* %base, i64 %offset, i64 %vl)
4545  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
4546  ret <vscale x 4 x half> %1
4547}
4548
4549define <vscale x 4 x half> @test_vlsseg3_mask_nxv4f16(half* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
4550; CHECK-LABEL: test_vlsseg3_mask_nxv4f16:
4551; CHECK:       # %bb.0: # %entry
4552; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
4553; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1
4554; CHECK-NEXT:    vmv1r.v v8, v7
4555; CHECK-NEXT:    vmv1r.v v9, v7
4556; CHECK-NEXT:    vsetvli zero, zero, e16, m1, tu, mu
4557; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1, v0.t
4558; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
4559; CHECK-NEXT:    ret
4560entry:
4561  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(half* %base, i64 %offset, i64 %vl)
4562  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
4563  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
4564  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
4565  ret <vscale x 4 x half> %3
4566}
4567
4568declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(half*, i64, i64)
4569declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, <vscale x 4 x i1>, i64)
4570
4571define <vscale x 4 x half> @test_vlsseg4_nxv4f16(half* %base, i64 %offset, i64 %vl) {
4572; CHECK-LABEL: test_vlsseg4_nxv4f16:
4573; CHECK:       # %bb.0: # %entry
4574; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
4575; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1
4576; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
4577; CHECK-NEXT:    ret
4578entry:
4579  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(half* %base, i64 %offset, i64 %vl)
4580  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
4581  ret <vscale x 4 x half> %1
4582}
4583
4584define <vscale x 4 x half> @test_vlsseg4_mask_nxv4f16(half* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
4585; CHECK-LABEL: test_vlsseg4_mask_nxv4f16:
4586; CHECK:       # %bb.0: # %entry
4587; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
4588; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1
4589; CHECK-NEXT:    vmv1r.v v8, v7
4590; CHECK-NEXT:    vmv1r.v v9, v7
4591; CHECK-NEXT:    vmv1r.v v10, v7
4592; CHECK-NEXT:    vsetvli zero, zero, e16, m1, tu, mu
4593; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1, v0.t
4594; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
4595; CHECK-NEXT:    ret
4596entry:
4597  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(half* %base, i64 %offset, i64 %vl)
4598  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
4599  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
4600  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
4601  ret <vscale x 4 x half> %3
4602}
4603
4604declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(half*, i64, i64)
4605declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, <vscale x 4 x i1>, i64)
4606
4607define <vscale x 4 x half> @test_vlsseg5_nxv4f16(half* %base, i64 %offset, i64 %vl) {
4608; CHECK-LABEL: test_vlsseg5_nxv4f16:
4609; CHECK:       # %bb.0: # %entry
4610; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
4611; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1
4612; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
4613; CHECK-NEXT:    ret
4614entry:
4615  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(half* %base, i64 %offset, i64 %vl)
4616  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
4617  ret <vscale x 4 x half> %1
4618}
4619
4620define <vscale x 4 x half> @test_vlsseg5_mask_nxv4f16(half* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
4621; CHECK-LABEL: test_vlsseg5_mask_nxv4f16:
4622; CHECK:       # %bb.0: # %entry
4623; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
4624; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1
4625; CHECK-NEXT:    vmv1r.v v8, v7
4626; CHECK-NEXT:    vmv1r.v v9, v7
4627; CHECK-NEXT:    vmv1r.v v10, v7
4628; CHECK-NEXT:    vmv1r.v v11, v7
4629; CHECK-NEXT:    vsetvli zero, zero, e16, m1, tu, mu
4630; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1, v0.t
4631; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
4632; CHECK-NEXT:    ret
4633entry:
4634  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(half* %base, i64 %offset, i64 %vl)
4635  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
4636  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
4637  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
4638  ret <vscale x 4 x half> %3
4639}
4640
4641declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(half*, i64, i64)
4642declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, <vscale x 4 x i1>, i64)
4643
4644define <vscale x 4 x half> @test_vlsseg6_nxv4f16(half* %base, i64 %offset, i64 %vl) {
4645; CHECK-LABEL: test_vlsseg6_nxv4f16:
4646; CHECK:       # %bb.0: # %entry
4647; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
4648; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1
4649; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
4650; CHECK-NEXT:    ret
4651entry:
4652  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(half* %base, i64 %offset, i64 %vl)
4653  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
4654  ret <vscale x 4 x half> %1
4655}
4656
4657define <vscale x 4 x half> @test_vlsseg6_mask_nxv4f16(half* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
4658; CHECK-LABEL: test_vlsseg6_mask_nxv4f16:
4659; CHECK:       # %bb.0: # %entry
4660; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
4661; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1
4662; CHECK-NEXT:    vmv1r.v v8, v7
4663; CHECK-NEXT:    vmv1r.v v9, v7
4664; CHECK-NEXT:    vmv1r.v v10, v7
4665; CHECK-NEXT:    vmv1r.v v11, v7
4666; CHECK-NEXT:    vmv1r.v v12, v7
4667; CHECK-NEXT:    vsetvli zero, zero, e16, m1, tu, mu
4668; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1, v0.t
4669; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
4670; CHECK-NEXT:    ret
4671entry:
4672  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(half* %base, i64 %offset, i64 %vl)
4673  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
4674  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
4675  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
4676  ret <vscale x 4 x half> %3
4677}
4678
4679declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(half*, i64, i64)
4680declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, <vscale x 4 x i1>, i64)
4681
4682define <vscale x 4 x half> @test_vlsseg7_nxv4f16(half* %base, i64 %offset, i64 %vl) {
4683; CHECK-LABEL: test_vlsseg7_nxv4f16:
4684; CHECK:       # %bb.0: # %entry
4685; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
4686; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1
4687; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
4688; CHECK-NEXT:    ret
4689entry:
4690  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(half* %base, i64 %offset, i64 %vl)
4691  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
4692  ret <vscale x 4 x half> %1
4693}
4694
4695define <vscale x 4 x half> @test_vlsseg7_mask_nxv4f16(half* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
4696; CHECK-LABEL: test_vlsseg7_mask_nxv4f16:
4697; CHECK:       # %bb.0: # %entry
4698; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
4699; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1
4700; CHECK-NEXT:    vmv1r.v v8, v7
4701; CHECK-NEXT:    vmv1r.v v9, v7
4702; CHECK-NEXT:    vmv1r.v v10, v7
4703; CHECK-NEXT:    vmv1r.v v11, v7
4704; CHECK-NEXT:    vmv1r.v v12, v7
4705; CHECK-NEXT:    vmv1r.v v13, v7
4706; CHECK-NEXT:    vsetvli zero, zero, e16, m1, tu, mu
4707; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1, v0.t
4708; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
4709; CHECK-NEXT:    ret
4710entry:
4711  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(half* %base, i64 %offset, i64 %vl)
4712  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
4713  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
4714  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
4715  ret <vscale x 4 x half> %3
4716}
4717
4718declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(half*, i64, i64)
4719declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, <vscale x 4 x i1>, i64)
4720
4721define <vscale x 4 x half> @test_vlsseg8_nxv4f16(half* %base, i64 %offset, i64 %vl) {
4722; CHECK-LABEL: test_vlsseg8_nxv4f16:
4723; CHECK:       # %bb.0: # %entry
4724; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
4725; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1
4726; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
4727; CHECK-NEXT:    ret
4728entry:
4729  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(half* %base, i64 %offset, i64 %vl)
4730  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
4731  ret <vscale x 4 x half> %1
4732}
4733
4734define <vscale x 4 x half> @test_vlsseg8_mask_nxv4f16(half* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
4735; CHECK-LABEL: test_vlsseg8_mask_nxv4f16:
4736; CHECK:       # %bb.0: # %entry
4737; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
4738; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1
4739; CHECK-NEXT:    vmv1r.v v8, v7
4740; CHECK-NEXT:    vmv1r.v v9, v7
4741; CHECK-NEXT:    vmv1r.v v10, v7
4742; CHECK-NEXT:    vmv1r.v v11, v7
4743; CHECK-NEXT:    vmv1r.v v12, v7
4744; CHECK-NEXT:    vmv1r.v v13, v7
4745; CHECK-NEXT:    vmv1r.v v14, v7
4746; CHECK-NEXT:    vsetvli zero, zero, e16, m1, tu, mu
4747; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1, v0.t
4748; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
4749; CHECK-NEXT:    ret
4750entry:
4751  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(half* %base, i64 %offset, i64 %vl)
4752  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
4753  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
4754  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
4755  ret <vscale x 4 x half> %3
4756}
4757
4758declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(half*, i64, i64)
4759declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, <vscale x 2 x i1>, i64)
4760
4761define <vscale x 2 x half> @test_vlsseg2_nxv2f16(half* %base, i64 %offset, i64 %vl) {
4762; CHECK-LABEL: test_vlsseg2_nxv2f16:
4763; CHECK:       # %bb.0: # %entry
4764; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
4765; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
4766; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
4767; CHECK-NEXT:    ret
4768entry:
4769  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(half* %base, i64 %offset, i64 %vl)
4770  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
4771  ret <vscale x 2 x half> %1
4772}
4773
4774define <vscale x 2 x half> @test_vlsseg2_mask_nxv2f16(half* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
4775; CHECK-LABEL: test_vlsseg2_mask_nxv2f16:
4776; CHECK:       # %bb.0: # %entry
4777; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
4778; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
4779; CHECK-NEXT:    vmv1r.v v8, v7
4780; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, tu, mu
4781; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1, v0.t
4782; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
4783; CHECK-NEXT:    ret
4784entry:
4785  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(half* %base, i64 %offset, i64 %vl)
4786  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
4787  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
4788  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
4789  ret <vscale x 2 x half> %3
4790}
4791
4792declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(half*, i64, i64)
4793declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, <vscale x 2 x i1>, i64)
4794
4795define <vscale x 2 x half> @test_vlsseg3_nxv2f16(half* %base, i64 %offset, i64 %vl) {
4796; CHECK-LABEL: test_vlsseg3_nxv2f16:
4797; CHECK:       # %bb.0: # %entry
4798; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
4799; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1
4800; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
4801; CHECK-NEXT:    ret
4802entry:
4803  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(half* %base, i64 %offset, i64 %vl)
4804  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
4805  ret <vscale x 2 x half> %1
4806}
4807
4808define <vscale x 2 x half> @test_vlsseg3_mask_nxv2f16(half* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
4809; CHECK-LABEL: test_vlsseg3_mask_nxv2f16:
4810; CHECK:       # %bb.0: # %entry
4811; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
4812; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1
4813; CHECK-NEXT:    vmv1r.v v8, v7
4814; CHECK-NEXT:    vmv1r.v v9, v7
4815; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, tu, mu
4816; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1, v0.t
4817; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
4818; CHECK-NEXT:    ret
4819entry:
4820  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(half* %base, i64 %offset, i64 %vl)
4821  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
4822  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
4823  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
4824  ret <vscale x 2 x half> %3
4825}
4826
4827declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(half*, i64, i64)
4828declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, <vscale x 2 x i1>, i64)
4829
4830define <vscale x 2 x half> @test_vlsseg4_nxv2f16(half* %base, i64 %offset, i64 %vl) {
4831; CHECK-LABEL: test_vlsseg4_nxv2f16:
4832; CHECK:       # %bb.0: # %entry
4833; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
4834; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1
4835; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
4836; CHECK-NEXT:    ret
4837entry:
4838  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(half* %base, i64 %offset, i64 %vl)
4839  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
4840  ret <vscale x 2 x half> %1
4841}
4842
4843define <vscale x 2 x half> @test_vlsseg4_mask_nxv2f16(half* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
4844; CHECK-LABEL: test_vlsseg4_mask_nxv2f16:
4845; CHECK:       # %bb.0: # %entry
4846; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
4847; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1
4848; CHECK-NEXT:    vmv1r.v v8, v7
4849; CHECK-NEXT:    vmv1r.v v9, v7
4850; CHECK-NEXT:    vmv1r.v v10, v7
4851; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, tu, mu
4852; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1, v0.t
4853; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
4854; CHECK-NEXT:    ret
4855entry:
4856  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(half* %base, i64 %offset, i64 %vl)
4857  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
4858  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
4859  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
4860  ret <vscale x 2 x half> %3
4861}
4862
4863declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(half*, i64, i64)
4864declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, <vscale x 2 x i1>, i64)
4865
4866define <vscale x 2 x half> @test_vlsseg5_nxv2f16(half* %base, i64 %offset, i64 %vl) {
4867; CHECK-LABEL: test_vlsseg5_nxv2f16:
4868; CHECK:       # %bb.0: # %entry
4869; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
4870; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1
4871; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
4872; CHECK-NEXT:    ret
4873entry:
4874  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(half* %base, i64 %offset, i64 %vl)
4875  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
4876  ret <vscale x 2 x half> %1
4877}
4878
4879define <vscale x 2 x half> @test_vlsseg5_mask_nxv2f16(half* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
4880; CHECK-LABEL: test_vlsseg5_mask_nxv2f16:
4881; CHECK:       # %bb.0: # %entry
4882; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
4883; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1
4884; CHECK-NEXT:    vmv1r.v v8, v7
4885; CHECK-NEXT:    vmv1r.v v9, v7
4886; CHECK-NEXT:    vmv1r.v v10, v7
4887; CHECK-NEXT:    vmv1r.v v11, v7
4888; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, tu, mu
4889; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1, v0.t
4890; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
4891; CHECK-NEXT:    ret
4892entry:
4893  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(half* %base, i64 %offset, i64 %vl)
4894  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
4895  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
4896  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
4897  ret <vscale x 2 x half> %3
4898}
4899
4900declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(half*, i64, i64)
4901declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, <vscale x 2 x i1>, i64)
4902
4903define <vscale x 2 x half> @test_vlsseg6_nxv2f16(half* %base, i64 %offset, i64 %vl) {
4904; CHECK-LABEL: test_vlsseg6_nxv2f16:
4905; CHECK:       # %bb.0: # %entry
4906; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
4907; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1
4908; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
4909; CHECK-NEXT:    ret
4910entry:
4911  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(half* %base, i64 %offset, i64 %vl)
4912  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
4913  ret <vscale x 2 x half> %1
4914}
4915
4916define <vscale x 2 x half> @test_vlsseg6_mask_nxv2f16(half* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
4917; CHECK-LABEL: test_vlsseg6_mask_nxv2f16:
4918; CHECK:       # %bb.0: # %entry
4919; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
4920; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1
4921; CHECK-NEXT:    vmv1r.v v8, v7
4922; CHECK-NEXT:    vmv1r.v v9, v7
4923; CHECK-NEXT:    vmv1r.v v10, v7
4924; CHECK-NEXT:    vmv1r.v v11, v7
4925; CHECK-NEXT:    vmv1r.v v12, v7
4926; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, tu, mu
4927; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1, v0.t
4928; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
4929; CHECK-NEXT:    ret
4930entry:
4931  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(half* %base, i64 %offset, i64 %vl)
4932  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
4933  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
4934  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
4935  ret <vscale x 2 x half> %3
4936}
4937
4938declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(half*, i64, i64)
4939declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, <vscale x 2 x i1>, i64)
4940
4941define <vscale x 2 x half> @test_vlsseg7_nxv2f16(half* %base, i64 %offset, i64 %vl) {
4942; CHECK-LABEL: test_vlsseg7_nxv2f16:
4943; CHECK:       # %bb.0: # %entry
4944; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
4945; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1
4946; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
4947; CHECK-NEXT:    ret
4948entry:
4949  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(half* %base, i64 %offset, i64 %vl)
4950  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
4951  ret <vscale x 2 x half> %1
4952}
4953
4954define <vscale x 2 x half> @test_vlsseg7_mask_nxv2f16(half* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
4955; CHECK-LABEL: test_vlsseg7_mask_nxv2f16:
4956; CHECK:       # %bb.0: # %entry
4957; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
4958; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1
4959; CHECK-NEXT:    vmv1r.v v8, v7
4960; CHECK-NEXT:    vmv1r.v v9, v7
4961; CHECK-NEXT:    vmv1r.v v10, v7
4962; CHECK-NEXT:    vmv1r.v v11, v7
4963; CHECK-NEXT:    vmv1r.v v12, v7
4964; CHECK-NEXT:    vmv1r.v v13, v7
4965; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, tu, mu
4966; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1, v0.t
4967; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
4968; CHECK-NEXT:    ret
4969entry:
4970  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(half* %base, i64 %offset, i64 %vl)
4971  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
4972  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
4973  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
4974  ret <vscale x 2 x half> %3
4975}
4976
4977declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(half*, i64, i64)
4978declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, <vscale x 2 x i1>, i64)
4979
4980define <vscale x 2 x half> @test_vlsseg8_nxv2f16(half* %base, i64 %offset, i64 %vl) {
4981; CHECK-LABEL: test_vlsseg8_nxv2f16:
4982; CHECK:       # %bb.0: # %entry
4983; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
4984; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1
4985; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
4986; CHECK-NEXT:    ret
4987entry:
4988  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(half* %base, i64 %offset, i64 %vl)
4989  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
4990  ret <vscale x 2 x half> %1
4991}
4992
4993define <vscale x 2 x half> @test_vlsseg8_mask_nxv2f16(half* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
4994; CHECK-LABEL: test_vlsseg8_mask_nxv2f16:
4995; CHECK:       # %bb.0: # %entry
4996; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
4997; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1
4998; CHECK-NEXT:    vmv1r.v v8, v7
4999; CHECK-NEXT:    vmv1r.v v9, v7
5000; CHECK-NEXT:    vmv1r.v v10, v7
5001; CHECK-NEXT:    vmv1r.v v11, v7
5002; CHECK-NEXT:    vmv1r.v v12, v7
5003; CHECK-NEXT:    vmv1r.v v13, v7
5004; CHECK-NEXT:    vmv1r.v v14, v7
5005; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, tu, mu
5006; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1, v0.t
5007; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
5008; CHECK-NEXT:    ret
5009entry:
5010  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(half* %base, i64 %offset, i64 %vl)
5011  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
5012  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
5013  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
5014  ret <vscale x 2 x half> %3
5015}
5016
5017declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(float*, i64, i64)
5018declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>, float*, i64, <vscale x 4 x i1>, i64)
5019
5020define <vscale x 4 x float> @test_vlsseg2_nxv4f32(float* %base, i64 %offset, i64 %vl) {
5021; CHECK-LABEL: test_vlsseg2_nxv4f32:
5022; CHECK:       # %bb.0: # %entry
5023; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
5024; CHECK-NEXT:    vlsseg2e32.v v6, (a0), a1
5025; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
5026; CHECK-NEXT:    ret
5027entry:
5028  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(float* %base, i64 %offset, i64 %vl)
5029  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
5030  ret <vscale x 4 x float> %1
5031}
5032
5033define <vscale x 4 x float> @test_vlsseg2_mask_nxv4f32(float* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
5034; CHECK-LABEL: test_vlsseg2_mask_nxv4f32:
5035; CHECK:       # %bb.0: # %entry
5036; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
5037; CHECK-NEXT:    vlsseg2e32.v v6, (a0), a1
5038; CHECK-NEXT:    vmv2r.v v8, v6
5039; CHECK-NEXT:    vsetvli zero, zero, e32, m2, tu, mu
5040; CHECK-NEXT:    vlsseg2e32.v v6, (a0), a1, v0.t
5041; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
5042; CHECK-NEXT:    ret
5043entry:
5044  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(float* %base, i64 %offset, i64 %vl)
5045  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
5046  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.mask.nxv4f32(<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
5047  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
5048  ret <vscale x 4 x float> %3
5049}
5050
5051declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(float*, i64, i64)
5052declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, i64, <vscale x 4 x i1>, i64)
5053
5054define <vscale x 4 x float> @test_vlsseg3_nxv4f32(float* %base, i64 %offset, i64 %vl) {
5055; CHECK-LABEL: test_vlsseg3_nxv4f32:
5056; CHECK:       # %bb.0: # %entry
5057; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
5058; CHECK-NEXT:    vlsseg3e32.v v6, (a0), a1
5059; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
5060; CHECK-NEXT:    ret
5061entry:
5062  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(float* %base, i64 %offset, i64 %vl)
5063  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
5064  ret <vscale x 4 x float> %1
5065}
5066
5067define <vscale x 4 x float> @test_vlsseg3_mask_nxv4f32(float* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
5068; CHECK-LABEL: test_vlsseg3_mask_nxv4f32:
5069; CHECK:       # %bb.0: # %entry
5070; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
5071; CHECK-NEXT:    vlsseg3e32.v v6, (a0), a1
5072; CHECK-NEXT:    vmv2r.v v8, v6
5073; CHECK-NEXT:    vmv2r.v v10, v6
5074; CHECK-NEXT:    vsetvli zero, zero, e32, m2, tu, mu
5075; CHECK-NEXT:    vlsseg3e32.v v6, (a0), a1, v0.t
5076; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
5077; CHECK-NEXT:    ret
5078entry:
5079  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(float* %base, i64 %offset, i64 %vl)
5080  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
5081  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.mask.nxv4f32(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
5082  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
5083  ret <vscale x 4 x float> %3
5084}
5085
5086declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(float*, i64, i64)
5087declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, i64, <vscale x 4 x i1>, i64)
5088
5089define <vscale x 4 x float> @test_vlsseg4_nxv4f32(float* %base, i64 %offset, i64 %vl) {
5090; CHECK-LABEL: test_vlsseg4_nxv4f32:
5091; CHECK:       # %bb.0: # %entry
5092; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
5093; CHECK-NEXT:    vlsseg4e32.v v6, (a0), a1
5094; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
5095; CHECK-NEXT:    ret
5096entry:
5097  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(float* %base, i64 %offset, i64 %vl)
5098  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
5099  ret <vscale x 4 x float> %1
5100}
5101
5102define <vscale x 4 x float> @test_vlsseg4_mask_nxv4f32(float* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
5103; CHECK-LABEL: test_vlsseg4_mask_nxv4f32:
5104; CHECK:       # %bb.0: # %entry
5105; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
5106; CHECK-NEXT:    vlsseg4e32.v v6, (a0), a1
5107; CHECK-NEXT:    vmv2r.v v8, v6
5108; CHECK-NEXT:    vmv2r.v v10, v6
5109; CHECK-NEXT:    vmv2r.v v12, v6
5110; CHECK-NEXT:    vsetvli zero, zero, e32, m2, tu, mu
5111; CHECK-NEXT:    vlsseg4e32.v v6, (a0), a1, v0.t
5112; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
5113; CHECK-NEXT:    ret
5114entry:
5115  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(float* %base, i64 %offset, i64 %vl)
5116  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
5117  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.mask.nxv4f32(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
5118  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
5119  ret <vscale x 4 x float> %3
5120}
5121