1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zvlsseg,+experimental-zfh \
3; RUN:     -verify-machineinstrs < %s | FileCheck %s
4
5declare void @llvm.riscv.vsseg2.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16* , i32)
6declare void @llvm.riscv.vsseg2.mask.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 16 x i1>, i32)
7
8define void @test_vsseg2_nxv16i16(<vscale x 16 x i16> %val, i16* %base, i32 %vl) {
9; CHECK-LABEL: test_vsseg2_nxv16i16:
10; CHECK:       # %bb.0: # %entry
11; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
12; CHECK-NEXT:    vmv4r.v v12, v8
13; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
14; CHECK-NEXT:    vsseg2e16.v v8, (a0)
15; CHECK-NEXT:    ret
16entry:
17  tail call void @llvm.riscv.vsseg2.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, i32 %vl)
18  ret void
19}
20
21define void @test_vsseg2_mask_nxv16i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i32 %vl) {
22; CHECK-LABEL: test_vsseg2_mask_nxv16i16:
23; CHECK:       # %bb.0: # %entry
24; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
25; CHECK-NEXT:    vmv4r.v v12, v8
26; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
27; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
28; CHECK-NEXT:    ret
29entry:
30  tail call void @llvm.riscv.vsseg2.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i32 %vl)
31  ret void
32}
33
34declare void @llvm.riscv.vsseg2.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8* , i32)
35declare void @llvm.riscv.vsseg2.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i1>, i32)
36
37define void @test_vsseg2_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %vl) {
38; CHECK-LABEL: test_vsseg2_nxv1i8:
39; CHECK:       # %bb.0: # %entry
40; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
41; CHECK-NEXT:    vmv1r.v v9, v8
42; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
43; CHECK-NEXT:    vsseg2e8.v v8, (a0)
44; CHECK-NEXT:    ret
45entry:
46  tail call void @llvm.riscv.vsseg2.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i32 %vl)
47  ret void
48}
49
50define void @test_vsseg2_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl) {
51; CHECK-LABEL: test_vsseg2_mask_nxv1i8:
52; CHECK:       # %bb.0: # %entry
53; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
54; CHECK-NEXT:    vmv1r.v v9, v8
55; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
56; CHECK-NEXT:    vsseg2e8.v v8, (a0), v0.t
57; CHECK-NEXT:    ret
58entry:
59  tail call void @llvm.riscv.vsseg2.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl)
60  ret void
61}
62
63declare void @llvm.riscv.vsseg3.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8* , i32)
64declare void @llvm.riscv.vsseg3.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i1>, i32)
65
66define void @test_vsseg3_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %vl) {
67; CHECK-LABEL: test_vsseg3_nxv1i8:
68; CHECK:       # %bb.0: # %entry
69; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
70; CHECK-NEXT:    vmv1r.v v9, v8
71; CHECK-NEXT:    vmv1r.v v10, v8
72; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
73; CHECK-NEXT:    vsseg3e8.v v8, (a0)
74; CHECK-NEXT:    ret
75entry:
76  tail call void @llvm.riscv.vsseg3.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i32 %vl)
77  ret void
78}
79
80define void @test_vsseg3_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl) {
81; CHECK-LABEL: test_vsseg3_mask_nxv1i8:
82; CHECK:       # %bb.0: # %entry
83; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
84; CHECK-NEXT:    vmv1r.v v9, v8
85; CHECK-NEXT:    vmv1r.v v10, v8
86; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
87; CHECK-NEXT:    vsseg3e8.v v8, (a0), v0.t
88; CHECK-NEXT:    ret
89entry:
90  tail call void @llvm.riscv.vsseg3.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl)
91  ret void
92}
93
94declare void @llvm.riscv.vsseg4.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8* , i32)
95declare void @llvm.riscv.vsseg4.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i1>, i32)
96
97define void @test_vsseg4_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %vl) {
98; CHECK-LABEL: test_vsseg4_nxv1i8:
99; CHECK:       # %bb.0: # %entry
100; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
101; CHECK-NEXT:    vmv1r.v v9, v8
102; CHECK-NEXT:    vmv1r.v v10, v8
103; CHECK-NEXT:    vmv1r.v v11, v8
104; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
105; CHECK-NEXT:    vsseg4e8.v v8, (a0)
106; CHECK-NEXT:    ret
107entry:
108  tail call void @llvm.riscv.vsseg4.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i32 %vl)
109  ret void
110}
111
112define void @test_vsseg4_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl) {
113; CHECK-LABEL: test_vsseg4_mask_nxv1i8:
114; CHECK:       # %bb.0: # %entry
115; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
116; CHECK-NEXT:    vmv1r.v v9, v8
117; CHECK-NEXT:    vmv1r.v v10, v8
118; CHECK-NEXT:    vmv1r.v v11, v8
119; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
120; CHECK-NEXT:    vsseg4e8.v v8, (a0), v0.t
121; CHECK-NEXT:    ret
122entry:
123  tail call void @llvm.riscv.vsseg4.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl)
124  ret void
125}
126
127declare void @llvm.riscv.vsseg5.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8* , i32)
128declare void @llvm.riscv.vsseg5.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i1>, i32)
129
130define void @test_vsseg5_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %vl) {
131; CHECK-LABEL: test_vsseg5_nxv1i8:
132; CHECK:       # %bb.0: # %entry
133; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
134; CHECK-NEXT:    vmv1r.v v9, v8
135; CHECK-NEXT:    vmv1r.v v10, v8
136; CHECK-NEXT:    vmv1r.v v11, v8
137; CHECK-NEXT:    vmv1r.v v12, v8
138; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
139; CHECK-NEXT:    vsseg5e8.v v8, (a0)
140; CHECK-NEXT:    ret
141entry:
142  tail call void @llvm.riscv.vsseg5.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i32 %vl)
143  ret void
144}
145
146define void @test_vsseg5_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl) {
147; CHECK-LABEL: test_vsseg5_mask_nxv1i8:
148; CHECK:       # %bb.0: # %entry
149; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
150; CHECK-NEXT:    vmv1r.v v9, v8
151; CHECK-NEXT:    vmv1r.v v10, v8
152; CHECK-NEXT:    vmv1r.v v11, v8
153; CHECK-NEXT:    vmv1r.v v12, v8
154; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
155; CHECK-NEXT:    vsseg5e8.v v8, (a0), v0.t
156; CHECK-NEXT:    ret
157entry:
158  tail call void @llvm.riscv.vsseg5.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl)
159  ret void
160}
161
162declare void @llvm.riscv.vsseg6.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8* , i32)
163declare void @llvm.riscv.vsseg6.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i1>, i32)
164
165define void @test_vsseg6_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %vl) {
166; CHECK-LABEL: test_vsseg6_nxv1i8:
167; CHECK:       # %bb.0: # %entry
168; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
169; CHECK-NEXT:    vmv1r.v v9, v8
170; CHECK-NEXT:    vmv1r.v v10, v8
171; CHECK-NEXT:    vmv1r.v v11, v8
172; CHECK-NEXT:    vmv1r.v v12, v8
173; CHECK-NEXT:    vmv1r.v v13, v8
174; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
175; CHECK-NEXT:    vsseg6e8.v v8, (a0)
176; CHECK-NEXT:    ret
177entry:
178  tail call void @llvm.riscv.vsseg6.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i32 %vl)
179  ret void
180}
181
182define void @test_vsseg6_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl) {
183; CHECK-LABEL: test_vsseg6_mask_nxv1i8:
184; CHECK:       # %bb.0: # %entry
185; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
186; CHECK-NEXT:    vmv1r.v v9, v8
187; CHECK-NEXT:    vmv1r.v v10, v8
188; CHECK-NEXT:    vmv1r.v v11, v8
189; CHECK-NEXT:    vmv1r.v v12, v8
190; CHECK-NEXT:    vmv1r.v v13, v8
191; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
192; CHECK-NEXT:    vsseg6e8.v v8, (a0), v0.t
193; CHECK-NEXT:    ret
194entry:
195  tail call void @llvm.riscv.vsseg6.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl)
196  ret void
197}
198
199declare void @llvm.riscv.vsseg7.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8* , i32)
200declare void @llvm.riscv.vsseg7.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i1>, i32)
201
202define void @test_vsseg7_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %vl) {
203; CHECK-LABEL: test_vsseg7_nxv1i8:
204; CHECK:       # %bb.0: # %entry
205; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
206; CHECK-NEXT:    vmv1r.v v9, v8
207; CHECK-NEXT:    vmv1r.v v10, v8
208; CHECK-NEXT:    vmv1r.v v11, v8
209; CHECK-NEXT:    vmv1r.v v12, v8
210; CHECK-NEXT:    vmv1r.v v13, v8
211; CHECK-NEXT:    vmv1r.v v14, v8
212; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
213; CHECK-NEXT:    vsseg7e8.v v8, (a0)
214; CHECK-NEXT:    ret
215entry:
216  tail call void @llvm.riscv.vsseg7.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i32 %vl)
217  ret void
218}
219
220define void @test_vsseg7_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl) {
221; CHECK-LABEL: test_vsseg7_mask_nxv1i8:
222; CHECK:       # %bb.0: # %entry
223; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
224; CHECK-NEXT:    vmv1r.v v9, v8
225; CHECK-NEXT:    vmv1r.v v10, v8
226; CHECK-NEXT:    vmv1r.v v11, v8
227; CHECK-NEXT:    vmv1r.v v12, v8
228; CHECK-NEXT:    vmv1r.v v13, v8
229; CHECK-NEXT:    vmv1r.v v14, v8
230; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
231; CHECK-NEXT:    vsseg7e8.v v8, (a0), v0.t
232; CHECK-NEXT:    ret
233entry:
234  tail call void @llvm.riscv.vsseg7.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl)
235  ret void
236}
237
238declare void @llvm.riscv.vsseg8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8* , i32)
239declare void @llvm.riscv.vsseg8.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i1>, i32)
240
241define void @test_vsseg8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %vl) {
242; CHECK-LABEL: test_vsseg8_nxv1i8:
243; CHECK:       # %bb.0: # %entry
244; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
245; CHECK-NEXT:    vmv1r.v v9, v8
246; CHECK-NEXT:    vmv1r.v v10, v8
247; CHECK-NEXT:    vmv1r.v v11, v8
248; CHECK-NEXT:    vmv1r.v v12, v8
249; CHECK-NEXT:    vmv1r.v v13, v8
250; CHECK-NEXT:    vmv1r.v v14, v8
251; CHECK-NEXT:    vmv1r.v v15, v8
252; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
253; CHECK-NEXT:    vsseg8e8.v v8, (a0)
254; CHECK-NEXT:    ret
255entry:
256  tail call void @llvm.riscv.vsseg8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i32 %vl)
257  ret void
258}
259
260define void @test_vsseg8_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl) {
261; CHECK-LABEL: test_vsseg8_mask_nxv1i8:
262; CHECK:       # %bb.0: # %entry
263; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
264; CHECK-NEXT:    vmv1r.v v9, v8
265; CHECK-NEXT:    vmv1r.v v10, v8
266; CHECK-NEXT:    vmv1r.v v11, v8
267; CHECK-NEXT:    vmv1r.v v12, v8
268; CHECK-NEXT:    vmv1r.v v13, v8
269; CHECK-NEXT:    vmv1r.v v14, v8
270; CHECK-NEXT:    vmv1r.v v15, v8
271; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
272; CHECK-NEXT:    vsseg8e8.v v8, (a0), v0.t
273; CHECK-NEXT:    ret
274entry:
275  tail call void @llvm.riscv.vsseg8.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl)
276  ret void
277}
278
279declare void @llvm.riscv.vsseg2.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8* , i32)
280declare void @llvm.riscv.vsseg2.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i1>, i32)
281
282define void @test_vsseg2_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i32 %vl) {
283; CHECK-LABEL: test_vsseg2_nxv16i8:
284; CHECK:       # %bb.0: # %entry
285; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
286; CHECK-NEXT:    vmv2r.v v10, v8
287; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
288; CHECK-NEXT:    vsseg2e8.v v8, (a0)
289; CHECK-NEXT:    ret
290entry:
291  tail call void @llvm.riscv.vsseg2.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, i32 %vl)
292  ret void
293}
294
295define void @test_vsseg2_mask_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i32 %vl) {
296; CHECK-LABEL: test_vsseg2_mask_nxv16i8:
297; CHECK:       # %bb.0: # %entry
298; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
299; CHECK-NEXT:    vmv2r.v v10, v8
300; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
301; CHECK-NEXT:    vsseg2e8.v v8, (a0), v0.t
302; CHECK-NEXT:    ret
303entry:
304  tail call void @llvm.riscv.vsseg2.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i32 %vl)
305  ret void
306}
307
308declare void @llvm.riscv.vsseg3.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8* , i32)
309declare void @llvm.riscv.vsseg3.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i1>, i32)
310
311define void @test_vsseg3_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i32 %vl) {
312; CHECK-LABEL: test_vsseg3_nxv16i8:
313; CHECK:       # %bb.0: # %entry
314; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
315; CHECK-NEXT:    vmv2r.v v10, v8
316; CHECK-NEXT:    vmv2r.v v12, v8
317; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
318; CHECK-NEXT:    vsseg3e8.v v8, (a0)
319; CHECK-NEXT:    ret
320entry:
321  tail call void @llvm.riscv.vsseg3.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, i32 %vl)
322  ret void
323}
324
325define void @test_vsseg3_mask_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i32 %vl) {
326; CHECK-LABEL: test_vsseg3_mask_nxv16i8:
327; CHECK:       # %bb.0: # %entry
328; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
329; CHECK-NEXT:    vmv2r.v v10, v8
330; CHECK-NEXT:    vmv2r.v v12, v8
331; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
332; CHECK-NEXT:    vsseg3e8.v v8, (a0), v0.t
333; CHECK-NEXT:    ret
334entry:
335  tail call void @llvm.riscv.vsseg3.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i32 %vl)
336  ret void
337}
338
339declare void @llvm.riscv.vsseg4.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8* , i32)
340declare void @llvm.riscv.vsseg4.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i1>, i32)
341
342define void @test_vsseg4_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i32 %vl) {
343; CHECK-LABEL: test_vsseg4_nxv16i8:
344; CHECK:       # %bb.0: # %entry
345; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
346; CHECK-NEXT:    vmv2r.v v10, v8
347; CHECK-NEXT:    vmv2r.v v12, v8
348; CHECK-NEXT:    vmv2r.v v14, v8
349; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
350; CHECK-NEXT:    vsseg4e8.v v8, (a0)
351; CHECK-NEXT:    ret
352entry:
353  tail call void @llvm.riscv.vsseg4.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, i32 %vl)
354  ret void
355}
356
357define void @test_vsseg4_mask_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i32 %vl) {
358; CHECK-LABEL: test_vsseg4_mask_nxv16i8:
359; CHECK:       # %bb.0: # %entry
360; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
361; CHECK-NEXT:    vmv2r.v v10, v8
362; CHECK-NEXT:    vmv2r.v v12, v8
363; CHECK-NEXT:    vmv2r.v v14, v8
364; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
365; CHECK-NEXT:    vsseg4e8.v v8, (a0), v0.t
366; CHECK-NEXT:    ret
367entry:
368  tail call void @llvm.riscv.vsseg4.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i32 %vl)
369  ret void
370}
371
372declare void @llvm.riscv.vsseg2.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32* , i32)
373declare void @llvm.riscv.vsseg2.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i1>, i32)
374
375define void @test_vsseg2_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %vl) {
376; CHECK-LABEL: test_vsseg2_nxv2i32:
377; CHECK:       # %bb.0: # %entry
378; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
379; CHECK-NEXT:    vmv1r.v v9, v8
380; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
381; CHECK-NEXT:    vsseg2e32.v v8, (a0)
382; CHECK-NEXT:    ret
383entry:
384  tail call void @llvm.riscv.vsseg2.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i32 %vl)
385  ret void
386}
387
388define void @test_vsseg2_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl) {
389; CHECK-LABEL: test_vsseg2_mask_nxv2i32:
390; CHECK:       # %bb.0: # %entry
391; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
392; CHECK-NEXT:    vmv1r.v v9, v8
393; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
394; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
395; CHECK-NEXT:    ret
396entry:
397  tail call void @llvm.riscv.vsseg2.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl)
398  ret void
399}
400
401declare void @llvm.riscv.vsseg3.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32* , i32)
402declare void @llvm.riscv.vsseg3.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i1>, i32)
403
404define void @test_vsseg3_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %vl) {
405; CHECK-LABEL: test_vsseg3_nxv2i32:
406; CHECK:       # %bb.0: # %entry
407; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
408; CHECK-NEXT:    vmv1r.v v9, v8
409; CHECK-NEXT:    vmv1r.v v10, v8
410; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
411; CHECK-NEXT:    vsseg3e32.v v8, (a0)
412; CHECK-NEXT:    ret
413entry:
414  tail call void @llvm.riscv.vsseg3.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i32 %vl)
415  ret void
416}
417
418define void @test_vsseg3_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl) {
419; CHECK-LABEL: test_vsseg3_mask_nxv2i32:
420; CHECK:       # %bb.0: # %entry
421; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
422; CHECK-NEXT:    vmv1r.v v9, v8
423; CHECK-NEXT:    vmv1r.v v10, v8
424; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
425; CHECK-NEXT:    vsseg3e32.v v8, (a0), v0.t
426; CHECK-NEXT:    ret
427entry:
428  tail call void @llvm.riscv.vsseg3.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl)
429  ret void
430}
431
432declare void @llvm.riscv.vsseg4.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32* , i32)
433declare void @llvm.riscv.vsseg4.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i1>, i32)
434
435define void @test_vsseg4_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %vl) {
436; CHECK-LABEL: test_vsseg4_nxv2i32:
437; CHECK:       # %bb.0: # %entry
438; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
439; CHECK-NEXT:    vmv1r.v v9, v8
440; CHECK-NEXT:    vmv1r.v v10, v8
441; CHECK-NEXT:    vmv1r.v v11, v8
442; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
443; CHECK-NEXT:    vsseg4e32.v v8, (a0)
444; CHECK-NEXT:    ret
445entry:
446  tail call void @llvm.riscv.vsseg4.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i32 %vl)
447  ret void
448}
449
450define void @test_vsseg4_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl) {
451; CHECK-LABEL: test_vsseg4_mask_nxv2i32:
452; CHECK:       # %bb.0: # %entry
453; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
454; CHECK-NEXT:    vmv1r.v v9, v8
455; CHECK-NEXT:    vmv1r.v v10, v8
456; CHECK-NEXT:    vmv1r.v v11, v8
457; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
458; CHECK-NEXT:    vsseg4e32.v v8, (a0), v0.t
459; CHECK-NEXT:    ret
460entry:
461  tail call void @llvm.riscv.vsseg4.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl)
462  ret void
463}
464
465declare void @llvm.riscv.vsseg5.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32* , i32)
466declare void @llvm.riscv.vsseg5.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i1>, i32)
467
468define void @test_vsseg5_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %vl) {
469; CHECK-LABEL: test_vsseg5_nxv2i32:
470; CHECK:       # %bb.0: # %entry
471; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
472; CHECK-NEXT:    vmv1r.v v9, v8
473; CHECK-NEXT:    vmv1r.v v10, v8
474; CHECK-NEXT:    vmv1r.v v11, v8
475; CHECK-NEXT:    vmv1r.v v12, v8
476; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
477; CHECK-NEXT:    vsseg5e32.v v8, (a0)
478; CHECK-NEXT:    ret
479entry:
480  tail call void @llvm.riscv.vsseg5.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i32 %vl)
481  ret void
482}
483
484define void @test_vsseg5_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl) {
485; CHECK-LABEL: test_vsseg5_mask_nxv2i32:
486; CHECK:       # %bb.0: # %entry
487; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
488; CHECK-NEXT:    vmv1r.v v9, v8
489; CHECK-NEXT:    vmv1r.v v10, v8
490; CHECK-NEXT:    vmv1r.v v11, v8
491; CHECK-NEXT:    vmv1r.v v12, v8
492; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
493; CHECK-NEXT:    vsseg5e32.v v8, (a0), v0.t
494; CHECK-NEXT:    ret
495entry:
496  tail call void @llvm.riscv.vsseg5.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl)
497  ret void
498}
499
500declare void @llvm.riscv.vsseg6.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32* , i32)
501declare void @llvm.riscv.vsseg6.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i1>, i32)
502
503define void @test_vsseg6_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %vl) {
504; CHECK-LABEL: test_vsseg6_nxv2i32:
505; CHECK:       # %bb.0: # %entry
506; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
507; CHECK-NEXT:    vmv1r.v v9, v8
508; CHECK-NEXT:    vmv1r.v v10, v8
509; CHECK-NEXT:    vmv1r.v v11, v8
510; CHECK-NEXT:    vmv1r.v v12, v8
511; CHECK-NEXT:    vmv1r.v v13, v8
512; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
513; CHECK-NEXT:    vsseg6e32.v v8, (a0)
514; CHECK-NEXT:    ret
515entry:
516  tail call void @llvm.riscv.vsseg6.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i32 %vl)
517  ret void
518}
519
520define void @test_vsseg6_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl) {
521; CHECK-LABEL: test_vsseg6_mask_nxv2i32:
522; CHECK:       # %bb.0: # %entry
523; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
524; CHECK-NEXT:    vmv1r.v v9, v8
525; CHECK-NEXT:    vmv1r.v v10, v8
526; CHECK-NEXT:    vmv1r.v v11, v8
527; CHECK-NEXT:    vmv1r.v v12, v8
528; CHECK-NEXT:    vmv1r.v v13, v8
529; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
530; CHECK-NEXT:    vsseg6e32.v v8, (a0), v0.t
531; CHECK-NEXT:    ret
532entry:
533  tail call void @llvm.riscv.vsseg6.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl)
534  ret void
535}
536
537declare void @llvm.riscv.vsseg7.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32* , i32)
538declare void @llvm.riscv.vsseg7.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i1>, i32)
539
540define void @test_vsseg7_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %vl) {
541; CHECK-LABEL: test_vsseg7_nxv2i32:
542; CHECK:       # %bb.0: # %entry
543; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
544; CHECK-NEXT:    vmv1r.v v9, v8
545; CHECK-NEXT:    vmv1r.v v10, v8
546; CHECK-NEXT:    vmv1r.v v11, v8
547; CHECK-NEXT:    vmv1r.v v12, v8
548; CHECK-NEXT:    vmv1r.v v13, v8
549; CHECK-NEXT:    vmv1r.v v14, v8
550; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
551; CHECK-NEXT:    vsseg7e32.v v8, (a0)
552; CHECK-NEXT:    ret
553entry:
554  tail call void @llvm.riscv.vsseg7.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i32 %vl)
555  ret void
556}
557
558define void @test_vsseg7_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl) {
559; CHECK-LABEL: test_vsseg7_mask_nxv2i32:
560; CHECK:       # %bb.0: # %entry
561; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
562; CHECK-NEXT:    vmv1r.v v9, v8
563; CHECK-NEXT:    vmv1r.v v10, v8
564; CHECK-NEXT:    vmv1r.v v11, v8
565; CHECK-NEXT:    vmv1r.v v12, v8
566; CHECK-NEXT:    vmv1r.v v13, v8
567; CHECK-NEXT:    vmv1r.v v14, v8
568; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
569; CHECK-NEXT:    vsseg7e32.v v8, (a0), v0.t
570; CHECK-NEXT:    ret
571entry:
572  tail call void @llvm.riscv.vsseg7.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl)
573  ret void
574}
575
576declare void @llvm.riscv.vsseg8.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32* , i32)
577declare void @llvm.riscv.vsseg8.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i1>, i32)
578
579define void @test_vsseg8_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %vl) {
580; CHECK-LABEL: test_vsseg8_nxv2i32:
581; CHECK:       # %bb.0: # %entry
582; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
583; CHECK-NEXT:    vmv1r.v v9, v8
584; CHECK-NEXT:    vmv1r.v v10, v8
585; CHECK-NEXT:    vmv1r.v v11, v8
586; CHECK-NEXT:    vmv1r.v v12, v8
587; CHECK-NEXT:    vmv1r.v v13, v8
588; CHECK-NEXT:    vmv1r.v v14, v8
589; CHECK-NEXT:    vmv1r.v v15, v8
590; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
591; CHECK-NEXT:    vsseg8e32.v v8, (a0)
592; CHECK-NEXT:    ret
593entry:
594  tail call void @llvm.riscv.vsseg8.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i32 %vl)
595  ret void
596}
597
598define void @test_vsseg8_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl) {
599; CHECK-LABEL: test_vsseg8_mask_nxv2i32:
600; CHECK:       # %bb.0: # %entry
601; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
602; CHECK-NEXT:    vmv1r.v v9, v8
603; CHECK-NEXT:    vmv1r.v v10, v8
604; CHECK-NEXT:    vmv1r.v v11, v8
605; CHECK-NEXT:    vmv1r.v v12, v8
606; CHECK-NEXT:    vmv1r.v v13, v8
607; CHECK-NEXT:    vmv1r.v v14, v8
608; CHECK-NEXT:    vmv1r.v v15, v8
609; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
610; CHECK-NEXT:    vsseg8e32.v v8, (a0), v0.t
611; CHECK-NEXT:    ret
612entry:
613  tail call void @llvm.riscv.vsseg8.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl)
614  ret void
615}
616
617declare void @llvm.riscv.vsseg2.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16* , i32)
618declare void @llvm.riscv.vsseg2.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i1>, i32)
619
620define void @test_vsseg2_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %vl) {
621; CHECK-LABEL: test_vsseg2_nxv4i16:
622; CHECK:       # %bb.0: # %entry
623; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
624; CHECK-NEXT:    vmv1r.v v9, v8
625; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
626; CHECK-NEXT:    vsseg2e16.v v8, (a0)
627; CHECK-NEXT:    ret
628entry:
629  tail call void @llvm.riscv.vsseg2.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i32 %vl)
630  ret void
631}
632
633define void @test_vsseg2_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl) {
634; CHECK-LABEL: test_vsseg2_mask_nxv4i16:
635; CHECK:       # %bb.0: # %entry
636; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
637; CHECK-NEXT:    vmv1r.v v9, v8
638; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
639; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
640; CHECK-NEXT:    ret
641entry:
642  tail call void @llvm.riscv.vsseg2.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl)
643  ret void
644}
645
646declare void @llvm.riscv.vsseg3.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16* , i32)
647declare void @llvm.riscv.vsseg3.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i1>, i32)
648
649define void @test_vsseg3_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %vl) {
650; CHECK-LABEL: test_vsseg3_nxv4i16:
651; CHECK:       # %bb.0: # %entry
652; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
653; CHECK-NEXT:    vmv1r.v v9, v8
654; CHECK-NEXT:    vmv1r.v v10, v8
655; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
656; CHECK-NEXT:    vsseg3e16.v v8, (a0)
657; CHECK-NEXT:    ret
658entry:
659  tail call void @llvm.riscv.vsseg3.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i32 %vl)
660  ret void
661}
662
663define void @test_vsseg3_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl) {
664; CHECK-LABEL: test_vsseg3_mask_nxv4i16:
665; CHECK:       # %bb.0: # %entry
666; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
667; CHECK-NEXT:    vmv1r.v v9, v8
668; CHECK-NEXT:    vmv1r.v v10, v8
669; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
670; CHECK-NEXT:    vsseg3e16.v v8, (a0), v0.t
671; CHECK-NEXT:    ret
672entry:
673  tail call void @llvm.riscv.vsseg3.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl)
674  ret void
675}
676
677declare void @llvm.riscv.vsseg4.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16* , i32)
678declare void @llvm.riscv.vsseg4.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i1>, i32)
679
680define void @test_vsseg4_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %vl) {
681; CHECK-LABEL: test_vsseg4_nxv4i16:
682; CHECK:       # %bb.0: # %entry
683; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
684; CHECK-NEXT:    vmv1r.v v9, v8
685; CHECK-NEXT:    vmv1r.v v10, v8
686; CHECK-NEXT:    vmv1r.v v11, v8
687; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
688; CHECK-NEXT:    vsseg4e16.v v8, (a0)
689; CHECK-NEXT:    ret
690entry:
691  tail call void @llvm.riscv.vsseg4.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i32 %vl)
692  ret void
693}
694
695define void @test_vsseg4_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl) {
696; CHECK-LABEL: test_vsseg4_mask_nxv4i16:
697; CHECK:       # %bb.0: # %entry
698; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
699; CHECK-NEXT:    vmv1r.v v9, v8
700; CHECK-NEXT:    vmv1r.v v10, v8
701; CHECK-NEXT:    vmv1r.v v11, v8
702; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
703; CHECK-NEXT:    vsseg4e16.v v8, (a0), v0.t
704; CHECK-NEXT:    ret
705entry:
706  tail call void @llvm.riscv.vsseg4.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl)
707  ret void
708}
709
710declare void @llvm.riscv.vsseg5.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16* , i32)
711declare void @llvm.riscv.vsseg5.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i1>, i32)
712
713define void @test_vsseg5_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %vl) {
714; CHECK-LABEL: test_vsseg5_nxv4i16:
715; CHECK:       # %bb.0: # %entry
716; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
717; CHECK-NEXT:    vmv1r.v v9, v8
718; CHECK-NEXT:    vmv1r.v v10, v8
719; CHECK-NEXT:    vmv1r.v v11, v8
720; CHECK-NEXT:    vmv1r.v v12, v8
721; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
722; CHECK-NEXT:    vsseg5e16.v v8, (a0)
723; CHECK-NEXT:    ret
724entry:
725  tail call void @llvm.riscv.vsseg5.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i32 %vl)
726  ret void
727}
728
729define void @test_vsseg5_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl) {
730; CHECK-LABEL: test_vsseg5_mask_nxv4i16:
731; CHECK:       # %bb.0: # %entry
732; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
733; CHECK-NEXT:    vmv1r.v v9, v8
734; CHECK-NEXT:    vmv1r.v v10, v8
735; CHECK-NEXT:    vmv1r.v v11, v8
736; CHECK-NEXT:    vmv1r.v v12, v8
737; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
738; CHECK-NEXT:    vsseg5e16.v v8, (a0), v0.t
739; CHECK-NEXT:    ret
740entry:
741  tail call void @llvm.riscv.vsseg5.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl)
742  ret void
743}
744
745declare void @llvm.riscv.vsseg6.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16* , i32)
746declare void @llvm.riscv.vsseg6.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i1>, i32)
747
748define void @test_vsseg6_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %vl) {
749; CHECK-LABEL: test_vsseg6_nxv4i16:
750; CHECK:       # %bb.0: # %entry
751; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
752; CHECK-NEXT:    vmv1r.v v9, v8
753; CHECK-NEXT:    vmv1r.v v10, v8
754; CHECK-NEXT:    vmv1r.v v11, v8
755; CHECK-NEXT:    vmv1r.v v12, v8
756; CHECK-NEXT:    vmv1r.v v13, v8
757; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
758; CHECK-NEXT:    vsseg6e16.v v8, (a0)
759; CHECK-NEXT:    ret
760entry:
761  tail call void @llvm.riscv.vsseg6.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i32 %vl)
762  ret void
763}
764
765define void @test_vsseg6_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl) {
766; CHECK-LABEL: test_vsseg6_mask_nxv4i16:
767; CHECK:       # %bb.0: # %entry
768; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
769; CHECK-NEXT:    vmv1r.v v9, v8
770; CHECK-NEXT:    vmv1r.v v10, v8
771; CHECK-NEXT:    vmv1r.v v11, v8
772; CHECK-NEXT:    vmv1r.v v12, v8
773; CHECK-NEXT:    vmv1r.v v13, v8
774; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
775; CHECK-NEXT:    vsseg6e16.v v8, (a0), v0.t
776; CHECK-NEXT:    ret
777entry:
778  tail call void @llvm.riscv.vsseg6.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl)
779  ret void
780}
781
782declare void @llvm.riscv.vsseg7.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16* , i32)
783declare void @llvm.riscv.vsseg7.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i1>, i32)
784
785define void @test_vsseg7_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %vl) {
786; CHECK-LABEL: test_vsseg7_nxv4i16:
787; CHECK:       # %bb.0: # %entry
788; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
789; CHECK-NEXT:    vmv1r.v v9, v8
790; CHECK-NEXT:    vmv1r.v v10, v8
791; CHECK-NEXT:    vmv1r.v v11, v8
792; CHECK-NEXT:    vmv1r.v v12, v8
793; CHECK-NEXT:    vmv1r.v v13, v8
794; CHECK-NEXT:    vmv1r.v v14, v8
795; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
796; CHECK-NEXT:    vsseg7e16.v v8, (a0)
797; CHECK-NEXT:    ret
798entry:
799  tail call void @llvm.riscv.vsseg7.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i32 %vl)
800  ret void
801}
802
803define void @test_vsseg7_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl) {
804; CHECK-LABEL: test_vsseg7_mask_nxv4i16:
805; CHECK:       # %bb.0: # %entry
806; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
807; CHECK-NEXT:    vmv1r.v v9, v8
808; CHECK-NEXT:    vmv1r.v v10, v8
809; CHECK-NEXT:    vmv1r.v v11, v8
810; CHECK-NEXT:    vmv1r.v v12, v8
811; CHECK-NEXT:    vmv1r.v v13, v8
812; CHECK-NEXT:    vmv1r.v v14, v8
813; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
814; CHECK-NEXT:    vsseg7e16.v v8, (a0), v0.t
815; CHECK-NEXT:    ret
816entry:
817  tail call void @llvm.riscv.vsseg7.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl)
818  ret void
819}
820
821declare void @llvm.riscv.vsseg8.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16* , i32)
822declare void @llvm.riscv.vsseg8.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i1>, i32)
823
824define void @test_vsseg8_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %vl) {
825; CHECK-LABEL: test_vsseg8_nxv4i16:
826; CHECK:       # %bb.0: # %entry
827; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
828; CHECK-NEXT:    vmv1r.v v9, v8
829; CHECK-NEXT:    vmv1r.v v10, v8
830; CHECK-NEXT:    vmv1r.v v11, v8
831; CHECK-NEXT:    vmv1r.v v12, v8
832; CHECK-NEXT:    vmv1r.v v13, v8
833; CHECK-NEXT:    vmv1r.v v14, v8
834; CHECK-NEXT:    vmv1r.v v15, v8
835; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
836; CHECK-NEXT:    vsseg8e16.v v8, (a0)
837; CHECK-NEXT:    ret
838entry:
839  tail call void @llvm.riscv.vsseg8.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i32 %vl)
840  ret void
841}
842
843define void @test_vsseg8_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl) {
844; CHECK-LABEL: test_vsseg8_mask_nxv4i16:
845; CHECK:       # %bb.0: # %entry
846; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
847; CHECK-NEXT:    vmv1r.v v9, v8
848; CHECK-NEXT:    vmv1r.v v10, v8
849; CHECK-NEXT:    vmv1r.v v11, v8
850; CHECK-NEXT:    vmv1r.v v12, v8
851; CHECK-NEXT:    vmv1r.v v13, v8
852; CHECK-NEXT:    vmv1r.v v14, v8
853; CHECK-NEXT:    vmv1r.v v15, v8
854; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
855; CHECK-NEXT:    vsseg8e16.v v8, (a0), v0.t
856; CHECK-NEXT:    ret
857entry:
858  tail call void @llvm.riscv.vsseg8.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl)
859  ret void
860}
861
862declare void @llvm.riscv.vsseg2.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32* , i32)
863declare void @llvm.riscv.vsseg2.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i1>, i32)
864
865define void @test_vsseg2_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %vl) {
866; CHECK-LABEL: test_vsseg2_nxv1i32:
867; CHECK:       # %bb.0: # %entry
868; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
869; CHECK-NEXT:    vmv1r.v v9, v8
870; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
871; CHECK-NEXT:    vsseg2e32.v v8, (a0)
872; CHECK-NEXT:    ret
873entry:
874  tail call void @llvm.riscv.vsseg2.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i32 %vl)
875  ret void
876}
877
878define void @test_vsseg2_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl) {
879; CHECK-LABEL: test_vsseg2_mask_nxv1i32:
880; CHECK:       # %bb.0: # %entry
881; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
882; CHECK-NEXT:    vmv1r.v v9, v8
883; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
884; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
885; CHECK-NEXT:    ret
886entry:
887  tail call void @llvm.riscv.vsseg2.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl)
888  ret void
889}
890
891declare void @llvm.riscv.vsseg3.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32* , i32)
892declare void @llvm.riscv.vsseg3.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i1>, i32)
893
894define void @test_vsseg3_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %vl) {
895; CHECK-LABEL: test_vsseg3_nxv1i32:
896; CHECK:       # %bb.0: # %entry
897; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
898; CHECK-NEXT:    vmv1r.v v9, v8
899; CHECK-NEXT:    vmv1r.v v10, v8
900; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
901; CHECK-NEXT:    vsseg3e32.v v8, (a0)
902; CHECK-NEXT:    ret
903entry:
904  tail call void @llvm.riscv.vsseg3.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i32 %vl)
905  ret void
906}
907
908define void @test_vsseg3_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl) {
909; CHECK-LABEL: test_vsseg3_mask_nxv1i32:
910; CHECK:       # %bb.0: # %entry
911; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
912; CHECK-NEXT:    vmv1r.v v9, v8
913; CHECK-NEXT:    vmv1r.v v10, v8
914; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
915; CHECK-NEXT:    vsseg3e32.v v8, (a0), v0.t
916; CHECK-NEXT:    ret
917entry:
918  tail call void @llvm.riscv.vsseg3.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl)
919  ret void
920}
921
922declare void @llvm.riscv.vsseg4.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32* , i32)
923declare void @llvm.riscv.vsseg4.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i1>, i32)
924
925define void @test_vsseg4_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %vl) {
926; CHECK-LABEL: test_vsseg4_nxv1i32:
927; CHECK:       # %bb.0: # %entry
928; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
929; CHECK-NEXT:    vmv1r.v v9, v8
930; CHECK-NEXT:    vmv1r.v v10, v8
931; CHECK-NEXT:    vmv1r.v v11, v8
932; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
933; CHECK-NEXT:    vsseg4e32.v v8, (a0)
934; CHECK-NEXT:    ret
935entry:
936  tail call void @llvm.riscv.vsseg4.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i32 %vl)
937  ret void
938}
939
940define void @test_vsseg4_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl) {
941; CHECK-LABEL: test_vsseg4_mask_nxv1i32:
942; CHECK:       # %bb.0: # %entry
943; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
944; CHECK-NEXT:    vmv1r.v v9, v8
945; CHECK-NEXT:    vmv1r.v v10, v8
946; CHECK-NEXT:    vmv1r.v v11, v8
947; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
948; CHECK-NEXT:    vsseg4e32.v v8, (a0), v0.t
949; CHECK-NEXT:    ret
950entry:
951  tail call void @llvm.riscv.vsseg4.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl)
952  ret void
953}
954
955declare void @llvm.riscv.vsseg5.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32* , i32)
956declare void @llvm.riscv.vsseg5.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i1>, i32)
957
958define void @test_vsseg5_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %vl) {
959; CHECK-LABEL: test_vsseg5_nxv1i32:
960; CHECK:       # %bb.0: # %entry
961; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
962; CHECK-NEXT:    vmv1r.v v9, v8
963; CHECK-NEXT:    vmv1r.v v10, v8
964; CHECK-NEXT:    vmv1r.v v11, v8
965; CHECK-NEXT:    vmv1r.v v12, v8
966; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
967; CHECK-NEXT:    vsseg5e32.v v8, (a0)
968; CHECK-NEXT:    ret
969entry:
970  tail call void @llvm.riscv.vsseg5.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i32 %vl)
971  ret void
972}
973
974define void @test_vsseg5_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl) {
975; CHECK-LABEL: test_vsseg5_mask_nxv1i32:
976; CHECK:       # %bb.0: # %entry
977; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
978; CHECK-NEXT:    vmv1r.v v9, v8
979; CHECK-NEXT:    vmv1r.v v10, v8
980; CHECK-NEXT:    vmv1r.v v11, v8
981; CHECK-NEXT:    vmv1r.v v12, v8
982; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
983; CHECK-NEXT:    vsseg5e32.v v8, (a0), v0.t
984; CHECK-NEXT:    ret
985entry:
986  tail call void @llvm.riscv.vsseg5.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl)
987  ret void
988}
989
990declare void @llvm.riscv.vsseg6.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32* , i32)
991declare void @llvm.riscv.vsseg6.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i1>, i32)
992
993define void @test_vsseg6_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %vl) {
994; CHECK-LABEL: test_vsseg6_nxv1i32:
995; CHECK:       # %bb.0: # %entry
996; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
997; CHECK-NEXT:    vmv1r.v v9, v8
998; CHECK-NEXT:    vmv1r.v v10, v8
999; CHECK-NEXT:    vmv1r.v v11, v8
1000; CHECK-NEXT:    vmv1r.v v12, v8
1001; CHECK-NEXT:    vmv1r.v v13, v8
1002; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
1003; CHECK-NEXT:    vsseg6e32.v v8, (a0)
1004; CHECK-NEXT:    ret
1005entry:
1006  tail call void @llvm.riscv.vsseg6.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i32 %vl)
1007  ret void
1008}
1009
1010define void @test_vsseg6_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl) {
1011; CHECK-LABEL: test_vsseg6_mask_nxv1i32:
1012; CHECK:       # %bb.0: # %entry
1013; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
1014; CHECK-NEXT:    vmv1r.v v9, v8
1015; CHECK-NEXT:    vmv1r.v v10, v8
1016; CHECK-NEXT:    vmv1r.v v11, v8
1017; CHECK-NEXT:    vmv1r.v v12, v8
1018; CHECK-NEXT:    vmv1r.v v13, v8
1019; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
1020; CHECK-NEXT:    vsseg6e32.v v8, (a0), v0.t
1021; CHECK-NEXT:    ret
1022entry:
1023  tail call void @llvm.riscv.vsseg6.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl)
1024  ret void
1025}
1026
1027declare void @llvm.riscv.vsseg7.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32* , i32)
1028declare void @llvm.riscv.vsseg7.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i1>, i32)
1029
1030define void @test_vsseg7_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %vl) {
1031; CHECK-LABEL: test_vsseg7_nxv1i32:
1032; CHECK:       # %bb.0: # %entry
1033; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
1034; CHECK-NEXT:    vmv1r.v v9, v8
1035; CHECK-NEXT:    vmv1r.v v10, v8
1036; CHECK-NEXT:    vmv1r.v v11, v8
1037; CHECK-NEXT:    vmv1r.v v12, v8
1038; CHECK-NEXT:    vmv1r.v v13, v8
1039; CHECK-NEXT:    vmv1r.v v14, v8
1040; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
1041; CHECK-NEXT:    vsseg7e32.v v8, (a0)
1042; CHECK-NEXT:    ret
1043entry:
1044  tail call void @llvm.riscv.vsseg7.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i32 %vl)
1045  ret void
1046}
1047
1048define void @test_vsseg7_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl) {
1049; CHECK-LABEL: test_vsseg7_mask_nxv1i32:
1050; CHECK:       # %bb.0: # %entry
1051; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
1052; CHECK-NEXT:    vmv1r.v v9, v8
1053; CHECK-NEXT:    vmv1r.v v10, v8
1054; CHECK-NEXT:    vmv1r.v v11, v8
1055; CHECK-NEXT:    vmv1r.v v12, v8
1056; CHECK-NEXT:    vmv1r.v v13, v8
1057; CHECK-NEXT:    vmv1r.v v14, v8
1058; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
1059; CHECK-NEXT:    vsseg7e32.v v8, (a0), v0.t
1060; CHECK-NEXT:    ret
1061entry:
1062  tail call void @llvm.riscv.vsseg7.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl)
1063  ret void
1064}
1065
1066declare void @llvm.riscv.vsseg8.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32* , i32)
1067declare void @llvm.riscv.vsseg8.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i1>, i32)
1068
1069define void @test_vsseg8_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %vl) {
1070; CHECK-LABEL: test_vsseg8_nxv1i32:
1071; CHECK:       # %bb.0: # %entry
1072; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
1073; CHECK-NEXT:    vmv1r.v v9, v8
1074; CHECK-NEXT:    vmv1r.v v10, v8
1075; CHECK-NEXT:    vmv1r.v v11, v8
1076; CHECK-NEXT:    vmv1r.v v12, v8
1077; CHECK-NEXT:    vmv1r.v v13, v8
1078; CHECK-NEXT:    vmv1r.v v14, v8
1079; CHECK-NEXT:    vmv1r.v v15, v8
1080; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
1081; CHECK-NEXT:    vsseg8e32.v v8, (a0)
1082; CHECK-NEXT:    ret
1083entry:
1084  tail call void @llvm.riscv.vsseg8.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i32 %vl)
1085  ret void
1086}
1087
1088define void @test_vsseg8_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl) {
1089; CHECK-LABEL: test_vsseg8_mask_nxv1i32:
1090; CHECK:       # %bb.0: # %entry
1091; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
1092; CHECK-NEXT:    vmv1r.v v9, v8
1093; CHECK-NEXT:    vmv1r.v v10, v8
1094; CHECK-NEXT:    vmv1r.v v11, v8
1095; CHECK-NEXT:    vmv1r.v v12, v8
1096; CHECK-NEXT:    vmv1r.v v13, v8
1097; CHECK-NEXT:    vmv1r.v v14, v8
1098; CHECK-NEXT:    vmv1r.v v15, v8
1099; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
1100; CHECK-NEXT:    vsseg8e32.v v8, (a0), v0.t
1101; CHECK-NEXT:    ret
1102entry:
1103  tail call void @llvm.riscv.vsseg8.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl)
1104  ret void
1105}
1106
1107declare void @llvm.riscv.vsseg2.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16* , i32)
1108declare void @llvm.riscv.vsseg2.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i1>, i32)
1109
1110define void @test_vsseg2_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i32 %vl) {
1111; CHECK-LABEL: test_vsseg2_nxv8i16:
1112; CHECK:       # %bb.0: # %entry
1113; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
1114; CHECK-NEXT:    vmv2r.v v10, v8
1115; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
1116; CHECK-NEXT:    vsseg2e16.v v8, (a0)
1117; CHECK-NEXT:    ret
1118entry:
1119  tail call void @llvm.riscv.vsseg2.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, i32 %vl)
1120  ret void
1121}
1122
1123define void @test_vsseg2_mask_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i32 %vl) {
1124; CHECK-LABEL: test_vsseg2_mask_nxv8i16:
1125; CHECK:       # %bb.0: # %entry
1126; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
1127; CHECK-NEXT:    vmv2r.v v10, v8
1128; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
1129; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
1130; CHECK-NEXT:    ret
1131entry:
1132  tail call void @llvm.riscv.vsseg2.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i32 %vl)
1133  ret void
1134}
1135
1136declare void @llvm.riscv.vsseg3.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16* , i32)
1137declare void @llvm.riscv.vsseg3.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i1>, i32)
1138
1139define void @test_vsseg3_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i32 %vl) {
1140; CHECK-LABEL: test_vsseg3_nxv8i16:
1141; CHECK:       # %bb.0: # %entry
1142; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
1143; CHECK-NEXT:    vmv2r.v v10, v8
1144; CHECK-NEXT:    vmv2r.v v12, v8
1145; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
1146; CHECK-NEXT:    vsseg3e16.v v8, (a0)
1147; CHECK-NEXT:    ret
1148entry:
1149  tail call void @llvm.riscv.vsseg3.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, i32 %vl)
1150  ret void
1151}
1152
1153define void @test_vsseg3_mask_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i32 %vl) {
1154; CHECK-LABEL: test_vsseg3_mask_nxv8i16:
1155; CHECK:       # %bb.0: # %entry
1156; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
1157; CHECK-NEXT:    vmv2r.v v10, v8
1158; CHECK-NEXT:    vmv2r.v v12, v8
1159; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
1160; CHECK-NEXT:    vsseg3e16.v v8, (a0), v0.t
1161; CHECK-NEXT:    ret
1162entry:
1163  tail call void @llvm.riscv.vsseg3.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i32 %vl)
1164  ret void
1165}
1166
1167declare void @llvm.riscv.vsseg4.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16* , i32)
1168declare void @llvm.riscv.vsseg4.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i1>, i32)
1169
1170define void @test_vsseg4_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i32 %vl) {
1171; CHECK-LABEL: test_vsseg4_nxv8i16:
1172; CHECK:       # %bb.0: # %entry
1173; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
1174; CHECK-NEXT:    vmv2r.v v10, v8
1175; CHECK-NEXT:    vmv2r.v v12, v8
1176; CHECK-NEXT:    vmv2r.v v14, v8
1177; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
1178; CHECK-NEXT:    vsseg4e16.v v8, (a0)
1179; CHECK-NEXT:    ret
1180entry:
1181  tail call void @llvm.riscv.vsseg4.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, i32 %vl)
1182  ret void
1183}
1184
1185define void @test_vsseg4_mask_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i32 %vl) {
1186; CHECK-LABEL: test_vsseg4_mask_nxv8i16:
1187; CHECK:       # %bb.0: # %entry
1188; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
1189; CHECK-NEXT:    vmv2r.v v10, v8
1190; CHECK-NEXT:    vmv2r.v v12, v8
1191; CHECK-NEXT:    vmv2r.v v14, v8
1192; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
1193; CHECK-NEXT:    vsseg4e16.v v8, (a0), v0.t
1194; CHECK-NEXT:    ret
1195entry:
1196  tail call void @llvm.riscv.vsseg4.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i32 %vl)
1197  ret void
1198}
1199
1200declare void @llvm.riscv.vsseg2.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8* , i32)
1201declare void @llvm.riscv.vsseg2.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i32)
1202
1203define void @test_vsseg2_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %vl) {
1204; CHECK-LABEL: test_vsseg2_nxv8i8:
1205; CHECK:       # %bb.0: # %entry
1206; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
1207; CHECK-NEXT:    vmv1r.v v9, v8
1208; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
1209; CHECK-NEXT:    vsseg2e8.v v8, (a0)
1210; CHECK-NEXT:    ret
1211entry:
1212  tail call void @llvm.riscv.vsseg2.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i32 %vl)
1213  ret void
1214}
1215
1216define void @test_vsseg2_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl) {
1217; CHECK-LABEL: test_vsseg2_mask_nxv8i8:
1218; CHECK:       # %bb.0: # %entry
1219; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
1220; CHECK-NEXT:    vmv1r.v v9, v8
1221; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
1222; CHECK-NEXT:    vsseg2e8.v v8, (a0), v0.t
1223; CHECK-NEXT:    ret
1224entry:
1225  tail call void @llvm.riscv.vsseg2.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl)
1226  ret void
1227}
1228
1229declare void @llvm.riscv.vsseg3.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8* , i32)
1230declare void @llvm.riscv.vsseg3.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i32)
1231
1232define void @test_vsseg3_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %vl) {
1233; CHECK-LABEL: test_vsseg3_nxv8i8:
1234; CHECK:       # %bb.0: # %entry
1235; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
1236; CHECK-NEXT:    vmv1r.v v9, v8
1237; CHECK-NEXT:    vmv1r.v v10, v8
1238; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
1239; CHECK-NEXT:    vsseg3e8.v v8, (a0)
1240; CHECK-NEXT:    ret
1241entry:
1242  tail call void @llvm.riscv.vsseg3.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i32 %vl)
1243  ret void
1244}
1245
1246define void @test_vsseg3_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl) {
1247; CHECK-LABEL: test_vsseg3_mask_nxv8i8:
1248; CHECK:       # %bb.0: # %entry
1249; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
1250; CHECK-NEXT:    vmv1r.v v9, v8
1251; CHECK-NEXT:    vmv1r.v v10, v8
1252; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
1253; CHECK-NEXT:    vsseg3e8.v v8, (a0), v0.t
1254; CHECK-NEXT:    ret
1255entry:
1256  tail call void @llvm.riscv.vsseg3.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl)
1257  ret void
1258}
1259
1260declare void @llvm.riscv.vsseg4.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8* , i32)
1261declare void @llvm.riscv.vsseg4.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i32)
1262
1263define void @test_vsseg4_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %vl) {
1264; CHECK-LABEL: test_vsseg4_nxv8i8:
1265; CHECK:       # %bb.0: # %entry
1266; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
1267; CHECK-NEXT:    vmv1r.v v9, v8
1268; CHECK-NEXT:    vmv1r.v v10, v8
1269; CHECK-NEXT:    vmv1r.v v11, v8
1270; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
1271; CHECK-NEXT:    vsseg4e8.v v8, (a0)
1272; CHECK-NEXT:    ret
1273entry:
1274  tail call void @llvm.riscv.vsseg4.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i32 %vl)
1275  ret void
1276}
1277
1278define void @test_vsseg4_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl) {
1279; CHECK-LABEL: test_vsseg4_mask_nxv8i8:
1280; CHECK:       # %bb.0: # %entry
1281; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
1282; CHECK-NEXT:    vmv1r.v v9, v8
1283; CHECK-NEXT:    vmv1r.v v10, v8
1284; CHECK-NEXT:    vmv1r.v v11, v8
1285; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
1286; CHECK-NEXT:    vsseg4e8.v v8, (a0), v0.t
1287; CHECK-NEXT:    ret
1288entry:
1289  tail call void @llvm.riscv.vsseg4.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl)
1290  ret void
1291}
1292
1293declare void @llvm.riscv.vsseg5.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8* , i32)
1294declare void @llvm.riscv.vsseg5.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i32)
1295
1296define void @test_vsseg5_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %vl) {
1297; CHECK-LABEL: test_vsseg5_nxv8i8:
1298; CHECK:       # %bb.0: # %entry
1299; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
1300; CHECK-NEXT:    vmv1r.v v9, v8
1301; CHECK-NEXT:    vmv1r.v v10, v8
1302; CHECK-NEXT:    vmv1r.v v11, v8
1303; CHECK-NEXT:    vmv1r.v v12, v8
1304; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
1305; CHECK-NEXT:    vsseg5e8.v v8, (a0)
1306; CHECK-NEXT:    ret
1307entry:
1308  tail call void @llvm.riscv.vsseg5.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i32 %vl)
1309  ret void
1310}
1311
1312define void @test_vsseg5_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl) {
1313; CHECK-LABEL: test_vsseg5_mask_nxv8i8:
1314; CHECK:       # %bb.0: # %entry
1315; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
1316; CHECK-NEXT:    vmv1r.v v9, v8
1317; CHECK-NEXT:    vmv1r.v v10, v8
1318; CHECK-NEXT:    vmv1r.v v11, v8
1319; CHECK-NEXT:    vmv1r.v v12, v8
1320; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
1321; CHECK-NEXT:    vsseg5e8.v v8, (a0), v0.t
1322; CHECK-NEXT:    ret
1323entry:
1324  tail call void @llvm.riscv.vsseg5.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl)
1325  ret void
1326}
1327
1328declare void @llvm.riscv.vsseg6.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8* , i32)
1329declare void @llvm.riscv.vsseg6.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i32)
1330
1331define void @test_vsseg6_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %vl) {
1332; CHECK-LABEL: test_vsseg6_nxv8i8:
1333; CHECK:       # %bb.0: # %entry
1334; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
1335; CHECK-NEXT:    vmv1r.v v9, v8
1336; CHECK-NEXT:    vmv1r.v v10, v8
1337; CHECK-NEXT:    vmv1r.v v11, v8
1338; CHECK-NEXT:    vmv1r.v v12, v8
1339; CHECK-NEXT:    vmv1r.v v13, v8
1340; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
1341; CHECK-NEXT:    vsseg6e8.v v8, (a0)
1342; CHECK-NEXT:    ret
1343entry:
1344  tail call void @llvm.riscv.vsseg6.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i32 %vl)
1345  ret void
1346}
1347
1348define void @test_vsseg6_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl) {
1349; CHECK-LABEL: test_vsseg6_mask_nxv8i8:
1350; CHECK:       # %bb.0: # %entry
1351; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
1352; CHECK-NEXT:    vmv1r.v v9, v8
1353; CHECK-NEXT:    vmv1r.v v10, v8
1354; CHECK-NEXT:    vmv1r.v v11, v8
1355; CHECK-NEXT:    vmv1r.v v12, v8
1356; CHECK-NEXT:    vmv1r.v v13, v8
1357; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
1358; CHECK-NEXT:    vsseg6e8.v v8, (a0), v0.t
1359; CHECK-NEXT:    ret
1360entry:
1361  tail call void @llvm.riscv.vsseg6.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl)
1362  ret void
1363}
1364
1365declare void @llvm.riscv.vsseg7.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8* , i32)
1366declare void @llvm.riscv.vsseg7.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i32)
1367
1368define void @test_vsseg7_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %vl) {
1369; CHECK-LABEL: test_vsseg7_nxv8i8:
1370; CHECK:       # %bb.0: # %entry
1371; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
1372; CHECK-NEXT:    vmv1r.v v9, v8
1373; CHECK-NEXT:    vmv1r.v v10, v8
1374; CHECK-NEXT:    vmv1r.v v11, v8
1375; CHECK-NEXT:    vmv1r.v v12, v8
1376; CHECK-NEXT:    vmv1r.v v13, v8
1377; CHECK-NEXT:    vmv1r.v v14, v8
1378; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
1379; CHECK-NEXT:    vsseg7e8.v v8, (a0)
1380; CHECK-NEXT:    ret
1381entry:
1382  tail call void @llvm.riscv.vsseg7.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i32 %vl)
1383  ret void
1384}
1385
1386define void @test_vsseg7_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl) {
1387; CHECK-LABEL: test_vsseg7_mask_nxv8i8:
1388; CHECK:       # %bb.0: # %entry
1389; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
1390; CHECK-NEXT:    vmv1r.v v9, v8
1391; CHECK-NEXT:    vmv1r.v v10, v8
1392; CHECK-NEXT:    vmv1r.v v11, v8
1393; CHECK-NEXT:    vmv1r.v v12, v8
1394; CHECK-NEXT:    vmv1r.v v13, v8
1395; CHECK-NEXT:    vmv1r.v v14, v8
1396; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
1397; CHECK-NEXT:    vsseg7e8.v v8, (a0), v0.t
1398; CHECK-NEXT:    ret
1399entry:
1400  tail call void @llvm.riscv.vsseg7.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl)
1401  ret void
1402}
1403
1404declare void @llvm.riscv.vsseg8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8* , i32)
1405declare void @llvm.riscv.vsseg8.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i32)
1406
1407define void @test_vsseg8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %vl) {
1408; CHECK-LABEL: test_vsseg8_nxv8i8:
1409; CHECK:       # %bb.0: # %entry
1410; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
1411; CHECK-NEXT:    vmv1r.v v9, v8
1412; CHECK-NEXT:    vmv1r.v v10, v8
1413; CHECK-NEXT:    vmv1r.v v11, v8
1414; CHECK-NEXT:    vmv1r.v v12, v8
1415; CHECK-NEXT:    vmv1r.v v13, v8
1416; CHECK-NEXT:    vmv1r.v v14, v8
1417; CHECK-NEXT:    vmv1r.v v15, v8
1418; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
1419; CHECK-NEXT:    vsseg8e8.v v8, (a0)
1420; CHECK-NEXT:    ret
1421entry:
1422  tail call void @llvm.riscv.vsseg8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i32 %vl)
1423  ret void
1424}
1425
1426define void @test_vsseg8_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl) {
1427; CHECK-LABEL: test_vsseg8_mask_nxv8i8:
1428; CHECK:       # %bb.0: # %entry
1429; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
1430; CHECK-NEXT:    vmv1r.v v9, v8
1431; CHECK-NEXT:    vmv1r.v v10, v8
1432; CHECK-NEXT:    vmv1r.v v11, v8
1433; CHECK-NEXT:    vmv1r.v v12, v8
1434; CHECK-NEXT:    vmv1r.v v13, v8
1435; CHECK-NEXT:    vmv1r.v v14, v8
1436; CHECK-NEXT:    vmv1r.v v15, v8
1437; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
1438; CHECK-NEXT:    vsseg8e8.v v8, (a0), v0.t
1439; CHECK-NEXT:    ret
1440entry:
1441  tail call void @llvm.riscv.vsseg8.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl)
1442  ret void
1443}
1444
1445declare void @llvm.riscv.vsseg2.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32* , i32)
1446declare void @llvm.riscv.vsseg2.mask.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 8 x i1>, i32)
1447
1448define void @test_vsseg2_nxv8i32(<vscale x 8 x i32> %val, i32* %base, i32 %vl) {
1449; CHECK-LABEL: test_vsseg2_nxv8i32:
1450; CHECK:       # %bb.0: # %entry
1451; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
1452; CHECK-NEXT:    vmv4r.v v12, v8
1453; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
1454; CHECK-NEXT:    vsseg2e32.v v8, (a0)
1455; CHECK-NEXT:    ret
1456entry:
1457  tail call void @llvm.riscv.vsseg2.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, i32 %vl)
1458  ret void
1459}
1460
1461define void @test_vsseg2_mask_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i1> %mask, i32 %vl) {
1462; CHECK-LABEL: test_vsseg2_mask_nxv8i32:
1463; CHECK:       # %bb.0: # %entry
1464; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
1465; CHECK-NEXT:    vmv4r.v v12, v8
1466; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
1467; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
1468; CHECK-NEXT:    ret
1469entry:
1470  tail call void @llvm.riscv.vsseg2.mask.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i1> %mask, i32 %vl)
1471  ret void
1472}
1473
1474declare void @llvm.riscv.vsseg2.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8* , i32)
1475declare void @llvm.riscv.vsseg2.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i1>, i32)
1476
1477define void @test_vsseg2_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %vl) {
1478; CHECK-LABEL: test_vsseg2_nxv4i8:
1479; CHECK:       # %bb.0: # %entry
1480; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
1481; CHECK-NEXT:    vmv1r.v v9, v8
1482; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
1483; CHECK-NEXT:    vsseg2e8.v v8, (a0)
1484; CHECK-NEXT:    ret
1485entry:
1486  tail call void @llvm.riscv.vsseg2.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i32 %vl)
1487  ret void
1488}
1489
1490define void @test_vsseg2_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl) {
1491; CHECK-LABEL: test_vsseg2_mask_nxv4i8:
1492; CHECK:       # %bb.0: # %entry
1493; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
1494; CHECK-NEXT:    vmv1r.v v9, v8
1495; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
1496; CHECK-NEXT:    vsseg2e8.v v8, (a0), v0.t
1497; CHECK-NEXT:    ret
1498entry:
1499  tail call void @llvm.riscv.vsseg2.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl)
1500  ret void
1501}
1502
1503declare void @llvm.riscv.vsseg3.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8* , i32)
1504declare void @llvm.riscv.vsseg3.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i1>, i32)
1505
1506define void @test_vsseg3_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %vl) {
1507; CHECK-LABEL: test_vsseg3_nxv4i8:
1508; CHECK:       # %bb.0: # %entry
1509; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
1510; CHECK-NEXT:    vmv1r.v v9, v8
1511; CHECK-NEXT:    vmv1r.v v10, v8
1512; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
1513; CHECK-NEXT:    vsseg3e8.v v8, (a0)
1514; CHECK-NEXT:    ret
1515entry:
1516  tail call void @llvm.riscv.vsseg3.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i32 %vl)
1517  ret void
1518}
1519
1520define void @test_vsseg3_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl) {
1521; CHECK-LABEL: test_vsseg3_mask_nxv4i8:
1522; CHECK:       # %bb.0: # %entry
1523; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
1524; CHECK-NEXT:    vmv1r.v v9, v8
1525; CHECK-NEXT:    vmv1r.v v10, v8
1526; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
1527; CHECK-NEXT:    vsseg3e8.v v8, (a0), v0.t
1528; CHECK-NEXT:    ret
1529entry:
1530  tail call void @llvm.riscv.vsseg3.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl)
1531  ret void
1532}
1533
1534declare void @llvm.riscv.vsseg4.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8* , i32)
1535declare void @llvm.riscv.vsseg4.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i1>, i32)
1536
1537define void @test_vsseg4_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %vl) {
1538; CHECK-LABEL: test_vsseg4_nxv4i8:
1539; CHECK:       # %bb.0: # %entry
1540; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
1541; CHECK-NEXT:    vmv1r.v v9, v8
1542; CHECK-NEXT:    vmv1r.v v10, v8
1543; CHECK-NEXT:    vmv1r.v v11, v8
1544; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
1545; CHECK-NEXT:    vsseg4e8.v v8, (a0)
1546; CHECK-NEXT:    ret
1547entry:
1548  tail call void @llvm.riscv.vsseg4.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i32 %vl)
1549  ret void
1550}
1551
1552define void @test_vsseg4_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl) {
1553; CHECK-LABEL: test_vsseg4_mask_nxv4i8:
1554; CHECK:       # %bb.0: # %entry
1555; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
1556; CHECK-NEXT:    vmv1r.v v9, v8
1557; CHECK-NEXT:    vmv1r.v v10, v8
1558; CHECK-NEXT:    vmv1r.v v11, v8
1559; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
1560; CHECK-NEXT:    vsseg4e8.v v8, (a0), v0.t
1561; CHECK-NEXT:    ret
1562entry:
1563  tail call void @llvm.riscv.vsseg4.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl)
1564  ret void
1565}
1566
1567declare void @llvm.riscv.vsseg5.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8* , i32)
1568declare void @llvm.riscv.vsseg5.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i1>, i32)
1569
1570define void @test_vsseg5_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %vl) {
1571; CHECK-LABEL: test_vsseg5_nxv4i8:
1572; CHECK:       # %bb.0: # %entry
1573; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
1574; CHECK-NEXT:    vmv1r.v v9, v8
1575; CHECK-NEXT:    vmv1r.v v10, v8
1576; CHECK-NEXT:    vmv1r.v v11, v8
1577; CHECK-NEXT:    vmv1r.v v12, v8
1578; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
1579; CHECK-NEXT:    vsseg5e8.v v8, (a0)
1580; CHECK-NEXT:    ret
1581entry:
1582  tail call void @llvm.riscv.vsseg5.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i32 %vl)
1583  ret void
1584}
1585
1586define void @test_vsseg5_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl) {
1587; CHECK-LABEL: test_vsseg5_mask_nxv4i8:
1588; CHECK:       # %bb.0: # %entry
1589; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
1590; CHECK-NEXT:    vmv1r.v v9, v8
1591; CHECK-NEXT:    vmv1r.v v10, v8
1592; CHECK-NEXT:    vmv1r.v v11, v8
1593; CHECK-NEXT:    vmv1r.v v12, v8
1594; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
1595; CHECK-NEXT:    vsseg5e8.v v8, (a0), v0.t
1596; CHECK-NEXT:    ret
1597entry:
1598  tail call void @llvm.riscv.vsseg5.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl)
1599  ret void
1600}
1601
1602declare void @llvm.riscv.vsseg6.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8* , i32)
1603declare void @llvm.riscv.vsseg6.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i1>, i32)
1604
1605define void @test_vsseg6_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %vl) {
1606; CHECK-LABEL: test_vsseg6_nxv4i8:
1607; CHECK:       # %bb.0: # %entry
1608; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
1609; CHECK-NEXT:    vmv1r.v v9, v8
1610; CHECK-NEXT:    vmv1r.v v10, v8
1611; CHECK-NEXT:    vmv1r.v v11, v8
1612; CHECK-NEXT:    vmv1r.v v12, v8
1613; CHECK-NEXT:    vmv1r.v v13, v8
1614; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
1615; CHECK-NEXT:    vsseg6e8.v v8, (a0)
1616; CHECK-NEXT:    ret
1617entry:
1618  tail call void @llvm.riscv.vsseg6.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i32 %vl)
1619  ret void
1620}
1621
1622define void @test_vsseg6_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl) {
1623; CHECK-LABEL: test_vsseg6_mask_nxv4i8:
1624; CHECK:       # %bb.0: # %entry
1625; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
1626; CHECK-NEXT:    vmv1r.v v9, v8
1627; CHECK-NEXT:    vmv1r.v v10, v8
1628; CHECK-NEXT:    vmv1r.v v11, v8
1629; CHECK-NEXT:    vmv1r.v v12, v8
1630; CHECK-NEXT:    vmv1r.v v13, v8
1631; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
1632; CHECK-NEXT:    vsseg6e8.v v8, (a0), v0.t
1633; CHECK-NEXT:    ret
1634entry:
1635  tail call void @llvm.riscv.vsseg6.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl)
1636  ret void
1637}
1638
1639declare void @llvm.riscv.vsseg7.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8* , i32)
1640declare void @llvm.riscv.vsseg7.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i1>, i32)
1641
1642define void @test_vsseg7_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %vl) {
1643; CHECK-LABEL: test_vsseg7_nxv4i8:
1644; CHECK:       # %bb.0: # %entry
1645; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
1646; CHECK-NEXT:    vmv1r.v v9, v8
1647; CHECK-NEXT:    vmv1r.v v10, v8
1648; CHECK-NEXT:    vmv1r.v v11, v8
1649; CHECK-NEXT:    vmv1r.v v12, v8
1650; CHECK-NEXT:    vmv1r.v v13, v8
1651; CHECK-NEXT:    vmv1r.v v14, v8
1652; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
1653; CHECK-NEXT:    vsseg7e8.v v8, (a0)
1654; CHECK-NEXT:    ret
1655entry:
1656  tail call void @llvm.riscv.vsseg7.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i32 %vl)
1657  ret void
1658}
1659
1660define void @test_vsseg7_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl) {
1661; CHECK-LABEL: test_vsseg7_mask_nxv4i8:
1662; CHECK:       # %bb.0: # %entry
1663; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
1664; CHECK-NEXT:    vmv1r.v v9, v8
1665; CHECK-NEXT:    vmv1r.v v10, v8
1666; CHECK-NEXT:    vmv1r.v v11, v8
1667; CHECK-NEXT:    vmv1r.v v12, v8
1668; CHECK-NEXT:    vmv1r.v v13, v8
1669; CHECK-NEXT:    vmv1r.v v14, v8
1670; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
1671; CHECK-NEXT:    vsseg7e8.v v8, (a0), v0.t
1672; CHECK-NEXT:    ret
1673entry:
1674  tail call void @llvm.riscv.vsseg7.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl)
1675  ret void
1676}
1677
1678declare void @llvm.riscv.vsseg8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8* , i32)
1679declare void @llvm.riscv.vsseg8.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i1>, i32)
1680
1681define void @test_vsseg8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %vl) {
1682; CHECK-LABEL: test_vsseg8_nxv4i8:
1683; CHECK:       # %bb.0: # %entry
1684; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
1685; CHECK-NEXT:    vmv1r.v v9, v8
1686; CHECK-NEXT:    vmv1r.v v10, v8
1687; CHECK-NEXT:    vmv1r.v v11, v8
1688; CHECK-NEXT:    vmv1r.v v12, v8
1689; CHECK-NEXT:    vmv1r.v v13, v8
1690; CHECK-NEXT:    vmv1r.v v14, v8
1691; CHECK-NEXT:    vmv1r.v v15, v8
1692; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
1693; CHECK-NEXT:    vsseg8e8.v v8, (a0)
1694; CHECK-NEXT:    ret
1695entry:
1696  tail call void @llvm.riscv.vsseg8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i32 %vl)
1697  ret void
1698}
1699
1700define void @test_vsseg8_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl) {
1701; CHECK-LABEL: test_vsseg8_mask_nxv4i8:
1702; CHECK:       # %bb.0: # %entry
1703; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
1704; CHECK-NEXT:    vmv1r.v v9, v8
1705; CHECK-NEXT:    vmv1r.v v10, v8
1706; CHECK-NEXT:    vmv1r.v v11, v8
1707; CHECK-NEXT:    vmv1r.v v12, v8
1708; CHECK-NEXT:    vmv1r.v v13, v8
1709; CHECK-NEXT:    vmv1r.v v14, v8
1710; CHECK-NEXT:    vmv1r.v v15, v8
1711; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
1712; CHECK-NEXT:    vsseg8e8.v v8, (a0), v0.t
1713; CHECK-NEXT:    ret
1714entry:
1715  tail call void @llvm.riscv.vsseg8.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl)
1716  ret void
1717}
1718
1719declare void @llvm.riscv.vsseg2.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16* , i32)
1720declare void @llvm.riscv.vsseg2.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i1>, i32)
1721
1722define void @test_vsseg2_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %vl) {
1723; CHECK-LABEL: test_vsseg2_nxv1i16:
1724; CHECK:       # %bb.0: # %entry
1725; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
1726; CHECK-NEXT:    vmv1r.v v9, v8
1727; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
1728; CHECK-NEXT:    vsseg2e16.v v8, (a0)
1729; CHECK-NEXT:    ret
1730entry:
1731  tail call void @llvm.riscv.vsseg2.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i32 %vl)
1732  ret void
1733}
1734
1735define void @test_vsseg2_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl) {
1736; CHECK-LABEL: test_vsseg2_mask_nxv1i16:
1737; CHECK:       # %bb.0: # %entry
1738; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
1739; CHECK-NEXT:    vmv1r.v v9, v8
1740; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
1741; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
1742; CHECK-NEXT:    ret
1743entry:
1744  tail call void @llvm.riscv.vsseg2.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl)
1745  ret void
1746}
1747
1748declare void @llvm.riscv.vsseg3.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16* , i32)
1749declare void @llvm.riscv.vsseg3.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i1>, i32)
1750
1751define void @test_vsseg3_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %vl) {
1752; CHECK-LABEL: test_vsseg3_nxv1i16:
1753; CHECK:       # %bb.0: # %entry
1754; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
1755; CHECK-NEXT:    vmv1r.v v9, v8
1756; CHECK-NEXT:    vmv1r.v v10, v8
1757; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
1758; CHECK-NEXT:    vsseg3e16.v v8, (a0)
1759; CHECK-NEXT:    ret
1760entry:
1761  tail call void @llvm.riscv.vsseg3.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i32 %vl)
1762  ret void
1763}
1764
1765define void @test_vsseg3_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl) {
1766; CHECK-LABEL: test_vsseg3_mask_nxv1i16:
1767; CHECK:       # %bb.0: # %entry
1768; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
1769; CHECK-NEXT:    vmv1r.v v9, v8
1770; CHECK-NEXT:    vmv1r.v v10, v8
1771; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
1772; CHECK-NEXT:    vsseg3e16.v v8, (a0), v0.t
1773; CHECK-NEXT:    ret
1774entry:
1775  tail call void @llvm.riscv.vsseg3.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl)
1776  ret void
1777}
1778
1779declare void @llvm.riscv.vsseg4.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16* , i32)
1780declare void @llvm.riscv.vsseg4.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i1>, i32)
1781
1782define void @test_vsseg4_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %vl) {
1783; CHECK-LABEL: test_vsseg4_nxv1i16:
1784; CHECK:       # %bb.0: # %entry
1785; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
1786; CHECK-NEXT:    vmv1r.v v9, v8
1787; CHECK-NEXT:    vmv1r.v v10, v8
1788; CHECK-NEXT:    vmv1r.v v11, v8
1789; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
1790; CHECK-NEXT:    vsseg4e16.v v8, (a0)
1791; CHECK-NEXT:    ret
1792entry:
1793  tail call void @llvm.riscv.vsseg4.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i32 %vl)
1794  ret void
1795}
1796
1797define void @test_vsseg4_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl) {
1798; CHECK-LABEL: test_vsseg4_mask_nxv1i16:
1799; CHECK:       # %bb.0: # %entry
1800; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
1801; CHECK-NEXT:    vmv1r.v v9, v8
1802; CHECK-NEXT:    vmv1r.v v10, v8
1803; CHECK-NEXT:    vmv1r.v v11, v8
1804; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
1805; CHECK-NEXT:    vsseg4e16.v v8, (a0), v0.t
1806; CHECK-NEXT:    ret
1807entry:
1808  tail call void @llvm.riscv.vsseg4.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl)
1809  ret void
1810}
1811
1812declare void @llvm.riscv.vsseg5.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16* , i32)
1813declare void @llvm.riscv.vsseg5.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i1>, i32)
1814
1815define void @test_vsseg5_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %vl) {
1816; CHECK-LABEL: test_vsseg5_nxv1i16:
1817; CHECK:       # %bb.0: # %entry
1818; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
1819; CHECK-NEXT:    vmv1r.v v9, v8
1820; CHECK-NEXT:    vmv1r.v v10, v8
1821; CHECK-NEXT:    vmv1r.v v11, v8
1822; CHECK-NEXT:    vmv1r.v v12, v8
1823; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
1824; CHECK-NEXT:    vsseg5e16.v v8, (a0)
1825; CHECK-NEXT:    ret
1826entry:
1827  tail call void @llvm.riscv.vsseg5.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i32 %vl)
1828  ret void
1829}
1830
1831define void @test_vsseg5_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl) {
1832; CHECK-LABEL: test_vsseg5_mask_nxv1i16:
1833; CHECK:       # %bb.0: # %entry
1834; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
1835; CHECK-NEXT:    vmv1r.v v9, v8
1836; CHECK-NEXT:    vmv1r.v v10, v8
1837; CHECK-NEXT:    vmv1r.v v11, v8
1838; CHECK-NEXT:    vmv1r.v v12, v8
1839; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
1840; CHECK-NEXT:    vsseg5e16.v v8, (a0), v0.t
1841; CHECK-NEXT:    ret
1842entry:
1843  tail call void @llvm.riscv.vsseg5.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl)
1844  ret void
1845}
1846
1847declare void @llvm.riscv.vsseg6.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16* , i32)
1848declare void @llvm.riscv.vsseg6.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i1>, i32)
1849
1850define void @test_vsseg6_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %vl) {
1851; CHECK-LABEL: test_vsseg6_nxv1i16:
1852; CHECK:       # %bb.0: # %entry
1853; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
1854; CHECK-NEXT:    vmv1r.v v9, v8
1855; CHECK-NEXT:    vmv1r.v v10, v8
1856; CHECK-NEXT:    vmv1r.v v11, v8
1857; CHECK-NEXT:    vmv1r.v v12, v8
1858; CHECK-NEXT:    vmv1r.v v13, v8
1859; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
1860; CHECK-NEXT:    vsseg6e16.v v8, (a0)
1861; CHECK-NEXT:    ret
1862entry:
1863  tail call void @llvm.riscv.vsseg6.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i32 %vl)
1864  ret void
1865}
1866
1867define void @test_vsseg6_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl) {
1868; CHECK-LABEL: test_vsseg6_mask_nxv1i16:
1869; CHECK:       # %bb.0: # %entry
1870; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
1871; CHECK-NEXT:    vmv1r.v v9, v8
1872; CHECK-NEXT:    vmv1r.v v10, v8
1873; CHECK-NEXT:    vmv1r.v v11, v8
1874; CHECK-NEXT:    vmv1r.v v12, v8
1875; CHECK-NEXT:    vmv1r.v v13, v8
1876; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
1877; CHECK-NEXT:    vsseg6e16.v v8, (a0), v0.t
1878; CHECK-NEXT:    ret
1879entry:
1880  tail call void @llvm.riscv.vsseg6.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl)
1881  ret void
1882}
1883
1884declare void @llvm.riscv.vsseg7.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16* , i32)
1885declare void @llvm.riscv.vsseg7.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i1>, i32)
1886
1887define void @test_vsseg7_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %vl) {
1888; CHECK-LABEL: test_vsseg7_nxv1i16:
1889; CHECK:       # %bb.0: # %entry
1890; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
1891; CHECK-NEXT:    vmv1r.v v9, v8
1892; CHECK-NEXT:    vmv1r.v v10, v8
1893; CHECK-NEXT:    vmv1r.v v11, v8
1894; CHECK-NEXT:    vmv1r.v v12, v8
1895; CHECK-NEXT:    vmv1r.v v13, v8
1896; CHECK-NEXT:    vmv1r.v v14, v8
1897; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
1898; CHECK-NEXT:    vsseg7e16.v v8, (a0)
1899; CHECK-NEXT:    ret
1900entry:
1901  tail call void @llvm.riscv.vsseg7.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i32 %vl)
1902  ret void
1903}
1904
1905define void @test_vsseg7_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl) {
1906; CHECK-LABEL: test_vsseg7_mask_nxv1i16:
1907; CHECK:       # %bb.0: # %entry
1908; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
1909; CHECK-NEXT:    vmv1r.v v9, v8
1910; CHECK-NEXT:    vmv1r.v v10, v8
1911; CHECK-NEXT:    vmv1r.v v11, v8
1912; CHECK-NEXT:    vmv1r.v v12, v8
1913; CHECK-NEXT:    vmv1r.v v13, v8
1914; CHECK-NEXT:    vmv1r.v v14, v8
1915; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
1916; CHECK-NEXT:    vsseg7e16.v v8, (a0), v0.t
1917; CHECK-NEXT:    ret
1918entry:
1919  tail call void @llvm.riscv.vsseg7.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl)
1920  ret void
1921}
1922
1923declare void @llvm.riscv.vsseg8.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16* , i32)
1924declare void @llvm.riscv.vsseg8.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i1>, i32)
1925
1926define void @test_vsseg8_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %vl) {
1927; CHECK-LABEL: test_vsseg8_nxv1i16:
1928; CHECK:       # %bb.0: # %entry
1929; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
1930; CHECK-NEXT:    vmv1r.v v9, v8
1931; CHECK-NEXT:    vmv1r.v v10, v8
1932; CHECK-NEXT:    vmv1r.v v11, v8
1933; CHECK-NEXT:    vmv1r.v v12, v8
1934; CHECK-NEXT:    vmv1r.v v13, v8
1935; CHECK-NEXT:    vmv1r.v v14, v8
1936; CHECK-NEXT:    vmv1r.v v15, v8
1937; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
1938; CHECK-NEXT:    vsseg8e16.v v8, (a0)
1939; CHECK-NEXT:    ret
1940entry:
1941  tail call void @llvm.riscv.vsseg8.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i32 %vl)
1942  ret void
1943}
1944
1945define void @test_vsseg8_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl) {
1946; CHECK-LABEL: test_vsseg8_mask_nxv1i16:
1947; CHECK:       # %bb.0: # %entry
1948; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
1949; CHECK-NEXT:    vmv1r.v v9, v8
1950; CHECK-NEXT:    vmv1r.v v10, v8
1951; CHECK-NEXT:    vmv1r.v v11, v8
1952; CHECK-NEXT:    vmv1r.v v12, v8
1953; CHECK-NEXT:    vmv1r.v v13, v8
1954; CHECK-NEXT:    vmv1r.v v14, v8
1955; CHECK-NEXT:    vmv1r.v v15, v8
1956; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
1957; CHECK-NEXT:    vsseg8e16.v v8, (a0), v0.t
1958; CHECK-NEXT:    ret
1959entry:
1960  tail call void @llvm.riscv.vsseg8.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl)
1961  ret void
1962}
1963
1964declare void @llvm.riscv.vsseg2.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8* , i32)
1965declare void @llvm.riscv.vsseg2.mask.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 32 x i1>, i32)
1966
1967define void @test_vsseg2_nxv32i8(<vscale x 32 x i8> %val, i8* %base, i32 %vl) {
1968; CHECK-LABEL: test_vsseg2_nxv32i8:
1969; CHECK:       # %bb.0: # %entry
1970; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
1971; CHECK-NEXT:    vmv4r.v v12, v8
1972; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
1973; CHECK-NEXT:    vsseg2e8.v v8, (a0)
1974; CHECK-NEXT:    ret
1975entry:
1976  tail call void @llvm.riscv.vsseg2.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, i32 %vl)
1977  ret void
1978}
1979
1980define void @test_vsseg2_mask_nxv32i8(<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i1> %mask, i32 %vl) {
1981; CHECK-LABEL: test_vsseg2_mask_nxv32i8:
1982; CHECK:       # %bb.0: # %entry
1983; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
1984; CHECK-NEXT:    vmv4r.v v12, v8
1985; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
1986; CHECK-NEXT:    vsseg2e8.v v8, (a0), v0.t
1987; CHECK-NEXT:    ret
1988entry:
1989  tail call void @llvm.riscv.vsseg2.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i1> %mask, i32 %vl)
1990  ret void
1991}
1992
1993declare void @llvm.riscv.vsseg2.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8* , i32)
1994declare void @llvm.riscv.vsseg2.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i1>, i32)
1995
1996define void @test_vsseg2_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %vl) {
1997; CHECK-LABEL: test_vsseg2_nxv2i8:
1998; CHECK:       # %bb.0: # %entry
1999; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
2000; CHECK-NEXT:    vmv1r.v v9, v8
2001; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
2002; CHECK-NEXT:    vsseg2e8.v v8, (a0)
2003; CHECK-NEXT:    ret
2004entry:
2005  tail call void @llvm.riscv.vsseg2.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i32 %vl)
2006  ret void
2007}
2008
2009define void @test_vsseg2_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl) {
2010; CHECK-LABEL: test_vsseg2_mask_nxv2i8:
2011; CHECK:       # %bb.0: # %entry
2012; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
2013; CHECK-NEXT:    vmv1r.v v9, v8
2014; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
2015; CHECK-NEXT:    vsseg2e8.v v8, (a0), v0.t
2016; CHECK-NEXT:    ret
2017entry:
2018  tail call void @llvm.riscv.vsseg2.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl)
2019  ret void
2020}
2021
2022declare void @llvm.riscv.vsseg3.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8* , i32)
2023declare void @llvm.riscv.vsseg3.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i1>, i32)
2024
2025define void @test_vsseg3_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %vl) {
2026; CHECK-LABEL: test_vsseg3_nxv2i8:
2027; CHECK:       # %bb.0: # %entry
2028; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
2029; CHECK-NEXT:    vmv1r.v v9, v8
2030; CHECK-NEXT:    vmv1r.v v10, v8
2031; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
2032; CHECK-NEXT:    vsseg3e8.v v8, (a0)
2033; CHECK-NEXT:    ret
2034entry:
2035  tail call void @llvm.riscv.vsseg3.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i32 %vl)
2036  ret void
2037}
2038
2039define void @test_vsseg3_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl) {
2040; CHECK-LABEL: test_vsseg3_mask_nxv2i8:
2041; CHECK:       # %bb.0: # %entry
2042; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
2043; CHECK-NEXT:    vmv1r.v v9, v8
2044; CHECK-NEXT:    vmv1r.v v10, v8
2045; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
2046; CHECK-NEXT:    vsseg3e8.v v8, (a0), v0.t
2047; CHECK-NEXT:    ret
2048entry:
2049  tail call void @llvm.riscv.vsseg3.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl)
2050  ret void
2051}
2052
2053declare void @llvm.riscv.vsseg4.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8* , i32)
2054declare void @llvm.riscv.vsseg4.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i1>, i32)
2055
2056define void @test_vsseg4_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %vl) {
2057; CHECK-LABEL: test_vsseg4_nxv2i8:
2058; CHECK:       # %bb.0: # %entry
2059; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
2060; CHECK-NEXT:    vmv1r.v v9, v8
2061; CHECK-NEXT:    vmv1r.v v10, v8
2062; CHECK-NEXT:    vmv1r.v v11, v8
2063; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
2064; CHECK-NEXT:    vsseg4e8.v v8, (a0)
2065; CHECK-NEXT:    ret
2066entry:
2067  tail call void @llvm.riscv.vsseg4.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i32 %vl)
2068  ret void
2069}
2070
2071define void @test_vsseg4_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl) {
2072; CHECK-LABEL: test_vsseg4_mask_nxv2i8:
2073; CHECK:       # %bb.0: # %entry
2074; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
2075; CHECK-NEXT:    vmv1r.v v9, v8
2076; CHECK-NEXT:    vmv1r.v v10, v8
2077; CHECK-NEXT:    vmv1r.v v11, v8
2078; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
2079; CHECK-NEXT:    vsseg4e8.v v8, (a0), v0.t
2080; CHECK-NEXT:    ret
2081entry:
2082  tail call void @llvm.riscv.vsseg4.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl)
2083  ret void
2084}
2085
2086declare void @llvm.riscv.vsseg5.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8* , i32)
2087declare void @llvm.riscv.vsseg5.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i1>, i32)
2088
2089define void @test_vsseg5_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %vl) {
2090; CHECK-LABEL: test_vsseg5_nxv2i8:
2091; CHECK:       # %bb.0: # %entry
2092; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
2093; CHECK-NEXT:    vmv1r.v v9, v8
2094; CHECK-NEXT:    vmv1r.v v10, v8
2095; CHECK-NEXT:    vmv1r.v v11, v8
2096; CHECK-NEXT:    vmv1r.v v12, v8
2097; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
2098; CHECK-NEXT:    vsseg5e8.v v8, (a0)
2099; CHECK-NEXT:    ret
2100entry:
2101  tail call void @llvm.riscv.vsseg5.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i32 %vl)
2102  ret void
2103}
2104
2105define void @test_vsseg5_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl) {
2106; CHECK-LABEL: test_vsseg5_mask_nxv2i8:
2107; CHECK:       # %bb.0: # %entry
2108; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
2109; CHECK-NEXT:    vmv1r.v v9, v8
2110; CHECK-NEXT:    vmv1r.v v10, v8
2111; CHECK-NEXT:    vmv1r.v v11, v8
2112; CHECK-NEXT:    vmv1r.v v12, v8
2113; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
2114; CHECK-NEXT:    vsseg5e8.v v8, (a0), v0.t
2115; CHECK-NEXT:    ret
2116entry:
2117  tail call void @llvm.riscv.vsseg5.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl)
2118  ret void
2119}
2120
2121declare void @llvm.riscv.vsseg6.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8* , i32)
2122declare void @llvm.riscv.vsseg6.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i1>, i32)
2123
2124define void @test_vsseg6_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %vl) {
2125; CHECK-LABEL: test_vsseg6_nxv2i8:
2126; CHECK:       # %bb.0: # %entry
2127; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
2128; CHECK-NEXT:    vmv1r.v v9, v8
2129; CHECK-NEXT:    vmv1r.v v10, v8
2130; CHECK-NEXT:    vmv1r.v v11, v8
2131; CHECK-NEXT:    vmv1r.v v12, v8
2132; CHECK-NEXT:    vmv1r.v v13, v8
2133; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
2134; CHECK-NEXT:    vsseg6e8.v v8, (a0)
2135; CHECK-NEXT:    ret
2136entry:
2137  tail call void @llvm.riscv.vsseg6.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i32 %vl)
2138  ret void
2139}
2140
2141define void @test_vsseg6_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl) {
2142; CHECK-LABEL: test_vsseg6_mask_nxv2i8:
2143; CHECK:       # %bb.0: # %entry
2144; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
2145; CHECK-NEXT:    vmv1r.v v9, v8
2146; CHECK-NEXT:    vmv1r.v v10, v8
2147; CHECK-NEXT:    vmv1r.v v11, v8
2148; CHECK-NEXT:    vmv1r.v v12, v8
2149; CHECK-NEXT:    vmv1r.v v13, v8
2150; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
2151; CHECK-NEXT:    vsseg6e8.v v8, (a0), v0.t
2152; CHECK-NEXT:    ret
2153entry:
2154  tail call void @llvm.riscv.vsseg6.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl)
2155  ret void
2156}
2157
2158declare void @llvm.riscv.vsseg7.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8* , i32)
2159declare void @llvm.riscv.vsseg7.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i1>, i32)
2160
2161define void @test_vsseg7_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %vl) {
2162; CHECK-LABEL: test_vsseg7_nxv2i8:
2163; CHECK:       # %bb.0: # %entry
2164; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
2165; CHECK-NEXT:    vmv1r.v v9, v8
2166; CHECK-NEXT:    vmv1r.v v10, v8
2167; CHECK-NEXT:    vmv1r.v v11, v8
2168; CHECK-NEXT:    vmv1r.v v12, v8
2169; CHECK-NEXT:    vmv1r.v v13, v8
2170; CHECK-NEXT:    vmv1r.v v14, v8
2171; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
2172; CHECK-NEXT:    vsseg7e8.v v8, (a0)
2173; CHECK-NEXT:    ret
2174entry:
2175  tail call void @llvm.riscv.vsseg7.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i32 %vl)
2176  ret void
2177}
2178
2179define void @test_vsseg7_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl) {
2180; CHECK-LABEL: test_vsseg7_mask_nxv2i8:
2181; CHECK:       # %bb.0: # %entry
2182; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
2183; CHECK-NEXT:    vmv1r.v v9, v8
2184; CHECK-NEXT:    vmv1r.v v10, v8
2185; CHECK-NEXT:    vmv1r.v v11, v8
2186; CHECK-NEXT:    vmv1r.v v12, v8
2187; CHECK-NEXT:    vmv1r.v v13, v8
2188; CHECK-NEXT:    vmv1r.v v14, v8
2189; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
2190; CHECK-NEXT:    vsseg7e8.v v8, (a0), v0.t
2191; CHECK-NEXT:    ret
2192entry:
2193  tail call void @llvm.riscv.vsseg7.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl)
2194  ret void
2195}
2196
2197declare void @llvm.riscv.vsseg8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8* , i32)
2198declare void @llvm.riscv.vsseg8.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i1>, i32)
2199
2200define void @test_vsseg8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %vl) {
2201; CHECK-LABEL: test_vsseg8_nxv2i8:
2202; CHECK:       # %bb.0: # %entry
2203; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
2204; CHECK-NEXT:    vmv1r.v v9, v8
2205; CHECK-NEXT:    vmv1r.v v10, v8
2206; CHECK-NEXT:    vmv1r.v v11, v8
2207; CHECK-NEXT:    vmv1r.v v12, v8
2208; CHECK-NEXT:    vmv1r.v v13, v8
2209; CHECK-NEXT:    vmv1r.v v14, v8
2210; CHECK-NEXT:    vmv1r.v v15, v8
2211; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
2212; CHECK-NEXT:    vsseg8e8.v v8, (a0)
2213; CHECK-NEXT:    ret
2214entry:
2215  tail call void @llvm.riscv.vsseg8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i32 %vl)
2216  ret void
2217}
2218
2219define void @test_vsseg8_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl) {
2220; CHECK-LABEL: test_vsseg8_mask_nxv2i8:
2221; CHECK:       # %bb.0: # %entry
2222; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
2223; CHECK-NEXT:    vmv1r.v v9, v8
2224; CHECK-NEXT:    vmv1r.v v10, v8
2225; CHECK-NEXT:    vmv1r.v v11, v8
2226; CHECK-NEXT:    vmv1r.v v12, v8
2227; CHECK-NEXT:    vmv1r.v v13, v8
2228; CHECK-NEXT:    vmv1r.v v14, v8
2229; CHECK-NEXT:    vmv1r.v v15, v8
2230; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
2231; CHECK-NEXT:    vsseg8e8.v v8, (a0), v0.t
2232; CHECK-NEXT:    ret
2233entry:
2234  tail call void @llvm.riscv.vsseg8.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl)
2235  ret void
2236}
2237
2238declare void @llvm.riscv.vsseg2.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16* , i32)
2239declare void @llvm.riscv.vsseg2.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i1>, i32)
2240
2241define void @test_vsseg2_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %vl) {
2242; CHECK-LABEL: test_vsseg2_nxv2i16:
2243; CHECK:       # %bb.0: # %entry
2244; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
2245; CHECK-NEXT:    vmv1r.v v9, v8
2246; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
2247; CHECK-NEXT:    vsseg2e16.v v8, (a0)
2248; CHECK-NEXT:    ret
2249entry:
2250  tail call void @llvm.riscv.vsseg2.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i32 %vl)
2251  ret void
2252}
2253
2254define void @test_vsseg2_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl) {
2255; CHECK-LABEL: test_vsseg2_mask_nxv2i16:
2256; CHECK:       # %bb.0: # %entry
2257; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
2258; CHECK-NEXT:    vmv1r.v v9, v8
2259; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
2260; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
2261; CHECK-NEXT:    ret
2262entry:
2263  tail call void @llvm.riscv.vsseg2.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl)
2264  ret void
2265}
2266
2267declare void @llvm.riscv.vsseg3.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16* , i32)
2268declare void @llvm.riscv.vsseg3.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i1>, i32)
2269
2270define void @test_vsseg3_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %vl) {
2271; CHECK-LABEL: test_vsseg3_nxv2i16:
2272; CHECK:       # %bb.0: # %entry
2273; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
2274; CHECK-NEXT:    vmv1r.v v9, v8
2275; CHECK-NEXT:    vmv1r.v v10, v8
2276; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
2277; CHECK-NEXT:    vsseg3e16.v v8, (a0)
2278; CHECK-NEXT:    ret
2279entry:
2280  tail call void @llvm.riscv.vsseg3.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i32 %vl)
2281  ret void
2282}
2283
2284define void @test_vsseg3_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl) {
2285; CHECK-LABEL: test_vsseg3_mask_nxv2i16:
2286; CHECK:       # %bb.0: # %entry
2287; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
2288; CHECK-NEXT:    vmv1r.v v9, v8
2289; CHECK-NEXT:    vmv1r.v v10, v8
2290; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
2291; CHECK-NEXT:    vsseg3e16.v v8, (a0), v0.t
2292; CHECK-NEXT:    ret
2293entry:
2294  tail call void @llvm.riscv.vsseg3.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl)
2295  ret void
2296}
2297
2298declare void @llvm.riscv.vsseg4.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16* , i32)
2299declare void @llvm.riscv.vsseg4.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i1>, i32)
2300
2301define void @test_vsseg4_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %vl) {
2302; CHECK-LABEL: test_vsseg4_nxv2i16:
2303; CHECK:       # %bb.0: # %entry
2304; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
2305; CHECK-NEXT:    vmv1r.v v9, v8
2306; CHECK-NEXT:    vmv1r.v v10, v8
2307; CHECK-NEXT:    vmv1r.v v11, v8
2308; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
2309; CHECK-NEXT:    vsseg4e16.v v8, (a0)
2310; CHECK-NEXT:    ret
2311entry:
2312  tail call void @llvm.riscv.vsseg4.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i32 %vl)
2313  ret void
2314}
2315
2316define void @test_vsseg4_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl) {
2317; CHECK-LABEL: test_vsseg4_mask_nxv2i16:
2318; CHECK:       # %bb.0: # %entry
2319; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
2320; CHECK-NEXT:    vmv1r.v v9, v8
2321; CHECK-NEXT:    vmv1r.v v10, v8
2322; CHECK-NEXT:    vmv1r.v v11, v8
2323; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
2324; CHECK-NEXT:    vsseg4e16.v v8, (a0), v0.t
2325; CHECK-NEXT:    ret
2326entry:
2327  tail call void @llvm.riscv.vsseg4.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl)
2328  ret void
2329}
2330
2331declare void @llvm.riscv.vsseg5.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16* , i32)
2332declare void @llvm.riscv.vsseg5.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i1>, i32)
2333
2334define void @test_vsseg5_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %vl) {
2335; CHECK-LABEL: test_vsseg5_nxv2i16:
2336; CHECK:       # %bb.0: # %entry
2337; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
2338; CHECK-NEXT:    vmv1r.v v9, v8
2339; CHECK-NEXT:    vmv1r.v v10, v8
2340; CHECK-NEXT:    vmv1r.v v11, v8
2341; CHECK-NEXT:    vmv1r.v v12, v8
2342; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
2343; CHECK-NEXT:    vsseg5e16.v v8, (a0)
2344; CHECK-NEXT:    ret
2345entry:
2346  tail call void @llvm.riscv.vsseg5.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i32 %vl)
2347  ret void
2348}
2349
2350define void @test_vsseg5_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl) {
2351; CHECK-LABEL: test_vsseg5_mask_nxv2i16:
2352; CHECK:       # %bb.0: # %entry
2353; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
2354; CHECK-NEXT:    vmv1r.v v9, v8
2355; CHECK-NEXT:    vmv1r.v v10, v8
2356; CHECK-NEXT:    vmv1r.v v11, v8
2357; CHECK-NEXT:    vmv1r.v v12, v8
2358; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
2359; CHECK-NEXT:    vsseg5e16.v v8, (a0), v0.t
2360; CHECK-NEXT:    ret
2361entry:
2362  tail call void @llvm.riscv.vsseg5.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl)
2363  ret void
2364}
2365
2366declare void @llvm.riscv.vsseg6.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16* , i32)
2367declare void @llvm.riscv.vsseg6.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i1>, i32)
2368
2369define void @test_vsseg6_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %vl) {
2370; CHECK-LABEL: test_vsseg6_nxv2i16:
2371; CHECK:       # %bb.0: # %entry
2372; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
2373; CHECK-NEXT:    vmv1r.v v9, v8
2374; CHECK-NEXT:    vmv1r.v v10, v8
2375; CHECK-NEXT:    vmv1r.v v11, v8
2376; CHECK-NEXT:    vmv1r.v v12, v8
2377; CHECK-NEXT:    vmv1r.v v13, v8
2378; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
2379; CHECK-NEXT:    vsseg6e16.v v8, (a0)
2380; CHECK-NEXT:    ret
2381entry:
2382  tail call void @llvm.riscv.vsseg6.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i32 %vl)
2383  ret void
2384}
2385
2386define void @test_vsseg6_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl) {
2387; CHECK-LABEL: test_vsseg6_mask_nxv2i16:
2388; CHECK:       # %bb.0: # %entry
2389; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
2390; CHECK-NEXT:    vmv1r.v v9, v8
2391; CHECK-NEXT:    vmv1r.v v10, v8
2392; CHECK-NEXT:    vmv1r.v v11, v8
2393; CHECK-NEXT:    vmv1r.v v12, v8
2394; CHECK-NEXT:    vmv1r.v v13, v8
2395; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
2396; CHECK-NEXT:    vsseg6e16.v v8, (a0), v0.t
2397; CHECK-NEXT:    ret
2398entry:
2399  tail call void @llvm.riscv.vsseg6.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl)
2400  ret void
2401}
2402
2403declare void @llvm.riscv.vsseg7.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16* , i32)
2404declare void @llvm.riscv.vsseg7.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i1>, i32)
2405
2406define void @test_vsseg7_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %vl) {
2407; CHECK-LABEL: test_vsseg7_nxv2i16:
2408; CHECK:       # %bb.0: # %entry
2409; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
2410; CHECK-NEXT:    vmv1r.v v9, v8
2411; CHECK-NEXT:    vmv1r.v v10, v8
2412; CHECK-NEXT:    vmv1r.v v11, v8
2413; CHECK-NEXT:    vmv1r.v v12, v8
2414; CHECK-NEXT:    vmv1r.v v13, v8
2415; CHECK-NEXT:    vmv1r.v v14, v8
2416; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
2417; CHECK-NEXT:    vsseg7e16.v v8, (a0)
2418; CHECK-NEXT:    ret
2419entry:
2420  tail call void @llvm.riscv.vsseg7.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i32 %vl)
2421  ret void
2422}
2423
2424define void @test_vsseg7_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl) {
2425; CHECK-LABEL: test_vsseg7_mask_nxv2i16:
2426; CHECK:       # %bb.0: # %entry
2427; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
2428; CHECK-NEXT:    vmv1r.v v9, v8
2429; CHECK-NEXT:    vmv1r.v v10, v8
2430; CHECK-NEXT:    vmv1r.v v11, v8
2431; CHECK-NEXT:    vmv1r.v v12, v8
2432; CHECK-NEXT:    vmv1r.v v13, v8
2433; CHECK-NEXT:    vmv1r.v v14, v8
2434; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
2435; CHECK-NEXT:    vsseg7e16.v v8, (a0), v0.t
2436; CHECK-NEXT:    ret
2437entry:
2438  tail call void @llvm.riscv.vsseg7.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl)
2439  ret void
2440}
2441
2442declare void @llvm.riscv.vsseg8.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16* , i32)
2443declare void @llvm.riscv.vsseg8.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i1>, i32)
2444
2445define void @test_vsseg8_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %vl) {
2446; CHECK-LABEL: test_vsseg8_nxv2i16:
2447; CHECK:       # %bb.0: # %entry
2448; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
2449; CHECK-NEXT:    vmv1r.v v9, v8
2450; CHECK-NEXT:    vmv1r.v v10, v8
2451; CHECK-NEXT:    vmv1r.v v11, v8
2452; CHECK-NEXT:    vmv1r.v v12, v8
2453; CHECK-NEXT:    vmv1r.v v13, v8
2454; CHECK-NEXT:    vmv1r.v v14, v8
2455; CHECK-NEXT:    vmv1r.v v15, v8
2456; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
2457; CHECK-NEXT:    vsseg8e16.v v8, (a0)
2458; CHECK-NEXT:    ret
2459entry:
2460  tail call void @llvm.riscv.vsseg8.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i32 %vl)
2461  ret void
2462}
2463
2464define void @test_vsseg8_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl) {
2465; CHECK-LABEL: test_vsseg8_mask_nxv2i16:
2466; CHECK:       # %bb.0: # %entry
2467; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
2468; CHECK-NEXT:    vmv1r.v v9, v8
2469; CHECK-NEXT:    vmv1r.v v10, v8
2470; CHECK-NEXT:    vmv1r.v v11, v8
2471; CHECK-NEXT:    vmv1r.v v12, v8
2472; CHECK-NEXT:    vmv1r.v v13, v8
2473; CHECK-NEXT:    vmv1r.v v14, v8
2474; CHECK-NEXT:    vmv1r.v v15, v8
2475; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
2476; CHECK-NEXT:    vsseg8e16.v v8, (a0), v0.t
2477; CHECK-NEXT:    ret
2478entry:
2479  tail call void @llvm.riscv.vsseg8.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl)
2480  ret void
2481}
2482
2483declare void @llvm.riscv.vsseg2.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32* , i32)
2484declare void @llvm.riscv.vsseg2.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i1>, i32)
2485
2486define void @test_vsseg2_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i32 %vl) {
2487; CHECK-LABEL: test_vsseg2_nxv4i32:
2488; CHECK:       # %bb.0: # %entry
2489; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
2490; CHECK-NEXT:    vmv2r.v v10, v8
2491; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
2492; CHECK-NEXT:    vsseg2e32.v v8, (a0)
2493; CHECK-NEXT:    ret
2494entry:
2495  tail call void @llvm.riscv.vsseg2.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, i32 %vl)
2496  ret void
2497}
2498
2499define void @test_vsseg2_mask_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i32 %vl) {
2500; CHECK-LABEL: test_vsseg2_mask_nxv4i32:
2501; CHECK:       # %bb.0: # %entry
2502; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
2503; CHECK-NEXT:    vmv2r.v v10, v8
2504; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
2505; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
2506; CHECK-NEXT:    ret
2507entry:
2508  tail call void @llvm.riscv.vsseg2.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i32 %vl)
2509  ret void
2510}
2511
2512declare void @llvm.riscv.vsseg3.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32* , i32)
2513declare void @llvm.riscv.vsseg3.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i1>, i32)
2514
2515define void @test_vsseg3_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i32 %vl) {
2516; CHECK-LABEL: test_vsseg3_nxv4i32:
2517; CHECK:       # %bb.0: # %entry
2518; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
2519; CHECK-NEXT:    vmv2r.v v10, v8
2520; CHECK-NEXT:    vmv2r.v v12, v8
2521; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
2522; CHECK-NEXT:    vsseg3e32.v v8, (a0)
2523; CHECK-NEXT:    ret
2524entry:
2525  tail call void @llvm.riscv.vsseg3.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, i32 %vl)
2526  ret void
2527}
2528
2529define void @test_vsseg3_mask_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i32 %vl) {
2530; CHECK-LABEL: test_vsseg3_mask_nxv4i32:
2531; CHECK:       # %bb.0: # %entry
2532; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
2533; CHECK-NEXT:    vmv2r.v v10, v8
2534; CHECK-NEXT:    vmv2r.v v12, v8
2535; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
2536; CHECK-NEXT:    vsseg3e32.v v8, (a0), v0.t
2537; CHECK-NEXT:    ret
2538entry:
2539  tail call void @llvm.riscv.vsseg3.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i32 %vl)
2540  ret void
2541}
2542
2543declare void @llvm.riscv.vsseg4.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32* , i32)
2544declare void @llvm.riscv.vsseg4.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i1>, i32)
2545
2546define void @test_vsseg4_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i32 %vl) {
2547; CHECK-LABEL: test_vsseg4_nxv4i32:
2548; CHECK:       # %bb.0: # %entry
2549; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
2550; CHECK-NEXT:    vmv2r.v v10, v8
2551; CHECK-NEXT:    vmv2r.v v12, v8
2552; CHECK-NEXT:    vmv2r.v v14, v8
2553; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
2554; CHECK-NEXT:    vsseg4e32.v v8, (a0)
2555; CHECK-NEXT:    ret
2556entry:
2557  tail call void @llvm.riscv.vsseg4.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, i32 %vl)
2558  ret void
2559}
2560
2561define void @test_vsseg4_mask_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i32 %vl) {
2562; CHECK-LABEL: test_vsseg4_mask_nxv4i32:
2563; CHECK:       # %bb.0: # %entry
2564; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
2565; CHECK-NEXT:    vmv2r.v v10, v8
2566; CHECK-NEXT:    vmv2r.v v12, v8
2567; CHECK-NEXT:    vmv2r.v v14, v8
2568; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
2569; CHECK-NEXT:    vsseg4e32.v v8, (a0), v0.t
2570; CHECK-NEXT:    ret
2571entry:
2572  tail call void @llvm.riscv.vsseg4.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i32 %vl)
2573  ret void
2574}
2575
2576declare void @llvm.riscv.vsseg2.nxv16f16(<vscale x 16 x half>,<vscale x 16 x half>, half* , i32)
2577declare void @llvm.riscv.vsseg2.mask.nxv16f16(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 16 x i1>, i32)
2578
2579define void @test_vsseg2_nxv16f16(<vscale x 16 x half> %val, half* %base, i32 %vl) {
2580; CHECK-LABEL: test_vsseg2_nxv16f16:
2581; CHECK:       # %bb.0: # %entry
2582; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
2583; CHECK-NEXT:    vmv4r.v v12, v8
2584; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
2585; CHECK-NEXT:    vsseg2e16.v v8, (a0)
2586; CHECK-NEXT:    ret
2587entry:
2588  tail call void @llvm.riscv.vsseg2.nxv16f16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, i32 %vl)
2589  ret void
2590}
2591
2592define void @test_vsseg2_mask_nxv16f16(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i1> %mask, i32 %vl) {
2593; CHECK-LABEL: test_vsseg2_mask_nxv16f16:
2594; CHECK:       # %bb.0: # %entry
2595; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
2596; CHECK-NEXT:    vmv4r.v v12, v8
2597; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
2598; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
2599; CHECK-NEXT:    ret
2600entry:
2601  tail call void @llvm.riscv.vsseg2.mask.nxv16f16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 16 x i1> %mask, i32 %vl)
2602  ret void
2603}
2604
2605declare void @llvm.riscv.vsseg2.nxv4f64(<vscale x 4 x double>,<vscale x 4 x double>, double* , i32)
2606declare void @llvm.riscv.vsseg2.mask.nxv4f64(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 4 x i1>, i32)
2607
2608define void @test_vsseg2_nxv4f64(<vscale x 4 x double> %val, double* %base, i32 %vl) {
2609; CHECK-LABEL: test_vsseg2_nxv4f64:
2610; CHECK:       # %bb.0: # %entry
2611; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
2612; CHECK-NEXT:    vmv4r.v v12, v8
2613; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
2614; CHECK-NEXT:    vsseg2e64.v v8, (a0)
2615; CHECK-NEXT:    ret
2616entry:
2617  tail call void @llvm.riscv.vsseg2.nxv4f64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, i32 %vl)
2618  ret void
2619}
2620
2621define void @test_vsseg2_mask_nxv4f64(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i1> %mask, i32 %vl) {
2622; CHECK-LABEL: test_vsseg2_mask_nxv4f64:
2623; CHECK:       # %bb.0: # %entry
2624; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
2625; CHECK-NEXT:    vmv4r.v v12, v8
2626; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
2627; CHECK-NEXT:    vsseg2e64.v v8, (a0), v0.t
2628; CHECK-NEXT:    ret
2629entry:
2630  tail call void @llvm.riscv.vsseg2.mask.nxv4f64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 4 x i1> %mask, i32 %vl)
2631  ret void
2632}
2633
2634declare void @llvm.riscv.vsseg2.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>, double* , i32)
2635declare void @llvm.riscv.vsseg2.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i1>, i32)
2636
2637define void @test_vsseg2_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %vl) {
2638; CHECK-LABEL: test_vsseg2_nxv1f64:
2639; CHECK:       # %bb.0: # %entry
2640; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
2641; CHECK-NEXT:    vmv1r.v v9, v8
2642; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
2643; CHECK-NEXT:    vsseg2e64.v v8, (a0)
2644; CHECK-NEXT:    ret
2645entry:
2646  tail call void @llvm.riscv.vsseg2.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i32 %vl)
2647  ret void
2648}
2649
2650define void @test_vsseg2_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl) {
2651; CHECK-LABEL: test_vsseg2_mask_nxv1f64:
2652; CHECK:       # %bb.0: # %entry
2653; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
2654; CHECK-NEXT:    vmv1r.v v9, v8
2655; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
2656; CHECK-NEXT:    vsseg2e64.v v8, (a0), v0.t
2657; CHECK-NEXT:    ret
2658entry:
2659  tail call void @llvm.riscv.vsseg2.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl)
2660  ret void
2661}
2662
2663declare void @llvm.riscv.vsseg3.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double* , i32)
2664declare void @llvm.riscv.vsseg3.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i1>, i32)
2665
2666define void @test_vsseg3_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %vl) {
2667; CHECK-LABEL: test_vsseg3_nxv1f64:
2668; CHECK:       # %bb.0: # %entry
2669; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
2670; CHECK-NEXT:    vmv1r.v v9, v8
2671; CHECK-NEXT:    vmv1r.v v10, v8
2672; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
2673; CHECK-NEXT:    vsseg3e64.v v8, (a0)
2674; CHECK-NEXT:    ret
2675entry:
2676  tail call void @llvm.riscv.vsseg3.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i32 %vl)
2677  ret void
2678}
2679
2680define void @test_vsseg3_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl) {
2681; CHECK-LABEL: test_vsseg3_mask_nxv1f64:
2682; CHECK:       # %bb.0: # %entry
2683; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
2684; CHECK-NEXT:    vmv1r.v v9, v8
2685; CHECK-NEXT:    vmv1r.v v10, v8
2686; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
2687; CHECK-NEXT:    vsseg3e64.v v8, (a0), v0.t
2688; CHECK-NEXT:    ret
2689entry:
2690  tail call void @llvm.riscv.vsseg3.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl)
2691  ret void
2692}
2693
2694declare void @llvm.riscv.vsseg4.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double* , i32)
2695declare void @llvm.riscv.vsseg4.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i1>, i32)
2696
2697define void @test_vsseg4_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %vl) {
2698; CHECK-LABEL: test_vsseg4_nxv1f64:
2699; CHECK:       # %bb.0: # %entry
2700; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
2701; CHECK-NEXT:    vmv1r.v v9, v8
2702; CHECK-NEXT:    vmv1r.v v10, v8
2703; CHECK-NEXT:    vmv1r.v v11, v8
2704; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
2705; CHECK-NEXT:    vsseg4e64.v v8, (a0)
2706; CHECK-NEXT:    ret
2707entry:
2708  tail call void @llvm.riscv.vsseg4.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i32 %vl)
2709  ret void
2710}
2711
2712define void @test_vsseg4_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl) {
2713; CHECK-LABEL: test_vsseg4_mask_nxv1f64:
2714; CHECK:       # %bb.0: # %entry
2715; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
2716; CHECK-NEXT:    vmv1r.v v9, v8
2717; CHECK-NEXT:    vmv1r.v v10, v8
2718; CHECK-NEXT:    vmv1r.v v11, v8
2719; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
2720; CHECK-NEXT:    vsseg4e64.v v8, (a0), v0.t
2721; CHECK-NEXT:    ret
2722entry:
2723  tail call void @llvm.riscv.vsseg4.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl)
2724  ret void
2725}
2726
2727declare void @llvm.riscv.vsseg5.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double* , i32)
2728declare void @llvm.riscv.vsseg5.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i1>, i32)
2729
2730define void @test_vsseg5_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %vl) {
2731; CHECK-LABEL: test_vsseg5_nxv1f64:
2732; CHECK:       # %bb.0: # %entry
2733; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
2734; CHECK-NEXT:    vmv1r.v v9, v8
2735; CHECK-NEXT:    vmv1r.v v10, v8
2736; CHECK-NEXT:    vmv1r.v v11, v8
2737; CHECK-NEXT:    vmv1r.v v12, v8
2738; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
2739; CHECK-NEXT:    vsseg5e64.v v8, (a0)
2740; CHECK-NEXT:    ret
2741entry:
2742  tail call void @llvm.riscv.vsseg5.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i32 %vl)
2743  ret void
2744}
2745
2746define void @test_vsseg5_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl) {
2747; CHECK-LABEL: test_vsseg5_mask_nxv1f64:
2748; CHECK:       # %bb.0: # %entry
2749; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
2750; CHECK-NEXT:    vmv1r.v v9, v8
2751; CHECK-NEXT:    vmv1r.v v10, v8
2752; CHECK-NEXT:    vmv1r.v v11, v8
2753; CHECK-NEXT:    vmv1r.v v12, v8
2754; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
2755; CHECK-NEXT:    vsseg5e64.v v8, (a0), v0.t
2756; CHECK-NEXT:    ret
2757entry:
2758  tail call void @llvm.riscv.vsseg5.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl)
2759  ret void
2760}
2761
2762declare void @llvm.riscv.vsseg6.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double* , i32)
2763declare void @llvm.riscv.vsseg6.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i1>, i32)
2764
2765define void @test_vsseg6_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %vl) {
2766; CHECK-LABEL: test_vsseg6_nxv1f64:
2767; CHECK:       # %bb.0: # %entry
2768; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
2769; CHECK-NEXT:    vmv1r.v v9, v8
2770; CHECK-NEXT:    vmv1r.v v10, v8
2771; CHECK-NEXT:    vmv1r.v v11, v8
2772; CHECK-NEXT:    vmv1r.v v12, v8
2773; CHECK-NEXT:    vmv1r.v v13, v8
2774; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
2775; CHECK-NEXT:    vsseg6e64.v v8, (a0)
2776; CHECK-NEXT:    ret
2777entry:
2778  tail call void @llvm.riscv.vsseg6.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i32 %vl)
2779  ret void
2780}
2781
2782define void @test_vsseg6_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl) {
2783; CHECK-LABEL: test_vsseg6_mask_nxv1f64:
2784; CHECK:       # %bb.0: # %entry
2785; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
2786; CHECK-NEXT:    vmv1r.v v9, v8
2787; CHECK-NEXT:    vmv1r.v v10, v8
2788; CHECK-NEXT:    vmv1r.v v11, v8
2789; CHECK-NEXT:    vmv1r.v v12, v8
2790; CHECK-NEXT:    vmv1r.v v13, v8
2791; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
2792; CHECK-NEXT:    vsseg6e64.v v8, (a0), v0.t
2793; CHECK-NEXT:    ret
2794entry:
2795  tail call void @llvm.riscv.vsseg6.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl)
2796  ret void
2797}
2798
2799declare void @llvm.riscv.vsseg7.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double* , i32)
2800declare void @llvm.riscv.vsseg7.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i1>, i32)
2801
2802define void @test_vsseg7_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %vl) {
2803; CHECK-LABEL: test_vsseg7_nxv1f64:
2804; CHECK:       # %bb.0: # %entry
2805; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
2806; CHECK-NEXT:    vmv1r.v v9, v8
2807; CHECK-NEXT:    vmv1r.v v10, v8
2808; CHECK-NEXT:    vmv1r.v v11, v8
2809; CHECK-NEXT:    vmv1r.v v12, v8
2810; CHECK-NEXT:    vmv1r.v v13, v8
2811; CHECK-NEXT:    vmv1r.v v14, v8
2812; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
2813; CHECK-NEXT:    vsseg7e64.v v8, (a0)
2814; CHECK-NEXT:    ret
2815entry:
2816  tail call void @llvm.riscv.vsseg7.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i32 %vl)
2817  ret void
2818}
2819
2820define void @test_vsseg7_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl) {
2821; CHECK-LABEL: test_vsseg7_mask_nxv1f64:
2822; CHECK:       # %bb.0: # %entry
2823; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
2824; CHECK-NEXT:    vmv1r.v v9, v8
2825; CHECK-NEXT:    vmv1r.v v10, v8
2826; CHECK-NEXT:    vmv1r.v v11, v8
2827; CHECK-NEXT:    vmv1r.v v12, v8
2828; CHECK-NEXT:    vmv1r.v v13, v8
2829; CHECK-NEXT:    vmv1r.v v14, v8
2830; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
2831; CHECK-NEXT:    vsseg7e64.v v8, (a0), v0.t
2832; CHECK-NEXT:    ret
2833entry:
2834  tail call void @llvm.riscv.vsseg7.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl)
2835  ret void
2836}
2837
2838declare void @llvm.riscv.vsseg8.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double* , i32)
2839declare void @llvm.riscv.vsseg8.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i1>, i32)
2840
2841define void @test_vsseg8_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %vl) {
2842; CHECK-LABEL: test_vsseg8_nxv1f64:
2843; CHECK:       # %bb.0: # %entry
2844; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
2845; CHECK-NEXT:    vmv1r.v v9, v8
2846; CHECK-NEXT:    vmv1r.v v10, v8
2847; CHECK-NEXT:    vmv1r.v v11, v8
2848; CHECK-NEXT:    vmv1r.v v12, v8
2849; CHECK-NEXT:    vmv1r.v v13, v8
2850; CHECK-NEXT:    vmv1r.v v14, v8
2851; CHECK-NEXT:    vmv1r.v v15, v8
2852; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
2853; CHECK-NEXT:    vsseg8e64.v v8, (a0)
2854; CHECK-NEXT:    ret
2855entry:
2856  tail call void @llvm.riscv.vsseg8.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i32 %vl)
2857  ret void
2858}
2859
2860define void @test_vsseg8_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl) {
2861; CHECK-LABEL: test_vsseg8_mask_nxv1f64:
2862; CHECK:       # %bb.0: # %entry
2863; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
2864; CHECK-NEXT:    vmv1r.v v9, v8
2865; CHECK-NEXT:    vmv1r.v v10, v8
2866; CHECK-NEXT:    vmv1r.v v11, v8
2867; CHECK-NEXT:    vmv1r.v v12, v8
2868; CHECK-NEXT:    vmv1r.v v13, v8
2869; CHECK-NEXT:    vmv1r.v v14, v8
2870; CHECK-NEXT:    vmv1r.v v15, v8
2871; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
2872; CHECK-NEXT:    vsseg8e64.v v8, (a0), v0.t
2873; CHECK-NEXT:    ret
2874entry:
2875  tail call void @llvm.riscv.vsseg8.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl)
2876  ret void
2877}
2878
2879declare void @llvm.riscv.vsseg2.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>, float* , i32)
2880declare void @llvm.riscv.vsseg2.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i1>, i32)
2881
2882define void @test_vsseg2_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %vl) {
2883; CHECK-LABEL: test_vsseg2_nxv2f32:
2884; CHECK:       # %bb.0: # %entry
2885; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
2886; CHECK-NEXT:    vmv1r.v v9, v8
2887; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
2888; CHECK-NEXT:    vsseg2e32.v v8, (a0)
2889; CHECK-NEXT:    ret
2890entry:
2891  tail call void @llvm.riscv.vsseg2.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i32 %vl)
2892  ret void
2893}
2894
2895define void @test_vsseg2_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl) {
2896; CHECK-LABEL: test_vsseg2_mask_nxv2f32:
2897; CHECK:       # %bb.0: # %entry
2898; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
2899; CHECK-NEXT:    vmv1r.v v9, v8
2900; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
2901; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
2902; CHECK-NEXT:    ret
2903entry:
2904  tail call void @llvm.riscv.vsseg2.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl)
2905  ret void
2906}
2907
2908declare void @llvm.riscv.vsseg3.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float* , i32)
2909declare void @llvm.riscv.vsseg3.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i1>, i32)
2910
2911define void @test_vsseg3_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %vl) {
2912; CHECK-LABEL: test_vsseg3_nxv2f32:
2913; CHECK:       # %bb.0: # %entry
2914; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
2915; CHECK-NEXT:    vmv1r.v v9, v8
2916; CHECK-NEXT:    vmv1r.v v10, v8
2917; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
2918; CHECK-NEXT:    vsseg3e32.v v8, (a0)
2919; CHECK-NEXT:    ret
2920entry:
2921  tail call void @llvm.riscv.vsseg3.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i32 %vl)
2922  ret void
2923}
2924
2925define void @test_vsseg3_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl) {
2926; CHECK-LABEL: test_vsseg3_mask_nxv2f32:
2927; CHECK:       # %bb.0: # %entry
2928; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
2929; CHECK-NEXT:    vmv1r.v v9, v8
2930; CHECK-NEXT:    vmv1r.v v10, v8
2931; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
2932; CHECK-NEXT:    vsseg3e32.v v8, (a0), v0.t
2933; CHECK-NEXT:    ret
2934entry:
2935  tail call void @llvm.riscv.vsseg3.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl)
2936  ret void
2937}
2938
2939declare void @llvm.riscv.vsseg4.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float* , i32)
2940declare void @llvm.riscv.vsseg4.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i1>, i32)
2941
2942define void @test_vsseg4_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %vl) {
2943; CHECK-LABEL: test_vsseg4_nxv2f32:
2944; CHECK:       # %bb.0: # %entry
2945; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
2946; CHECK-NEXT:    vmv1r.v v9, v8
2947; CHECK-NEXT:    vmv1r.v v10, v8
2948; CHECK-NEXT:    vmv1r.v v11, v8
2949; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
2950; CHECK-NEXT:    vsseg4e32.v v8, (a0)
2951; CHECK-NEXT:    ret
2952entry:
2953  tail call void @llvm.riscv.vsseg4.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i32 %vl)
2954  ret void
2955}
2956
2957define void @test_vsseg4_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl) {
2958; CHECK-LABEL: test_vsseg4_mask_nxv2f32:
2959; CHECK:       # %bb.0: # %entry
2960; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
2961; CHECK-NEXT:    vmv1r.v v9, v8
2962; CHECK-NEXT:    vmv1r.v v10, v8
2963; CHECK-NEXT:    vmv1r.v v11, v8
2964; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
2965; CHECK-NEXT:    vsseg4e32.v v8, (a0), v0.t
2966; CHECK-NEXT:    ret
2967entry:
2968  tail call void @llvm.riscv.vsseg4.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl)
2969  ret void
2970}
2971
2972declare void @llvm.riscv.vsseg5.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float* , i32)
2973declare void @llvm.riscv.vsseg5.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i1>, i32)
2974
2975define void @test_vsseg5_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %vl) {
2976; CHECK-LABEL: test_vsseg5_nxv2f32:
2977; CHECK:       # %bb.0: # %entry
2978; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
2979; CHECK-NEXT:    vmv1r.v v9, v8
2980; CHECK-NEXT:    vmv1r.v v10, v8
2981; CHECK-NEXT:    vmv1r.v v11, v8
2982; CHECK-NEXT:    vmv1r.v v12, v8
2983; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
2984; CHECK-NEXT:    vsseg5e32.v v8, (a0)
2985; CHECK-NEXT:    ret
2986entry:
2987  tail call void @llvm.riscv.vsseg5.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i32 %vl)
2988  ret void
2989}
2990
2991define void @test_vsseg5_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl) {
2992; CHECK-LABEL: test_vsseg5_mask_nxv2f32:
2993; CHECK:       # %bb.0: # %entry
2994; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
2995; CHECK-NEXT:    vmv1r.v v9, v8
2996; CHECK-NEXT:    vmv1r.v v10, v8
2997; CHECK-NEXT:    vmv1r.v v11, v8
2998; CHECK-NEXT:    vmv1r.v v12, v8
2999; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
3000; CHECK-NEXT:    vsseg5e32.v v8, (a0), v0.t
3001; CHECK-NEXT:    ret
3002entry:
3003  tail call void @llvm.riscv.vsseg5.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl)
3004  ret void
3005}
3006
3007declare void @llvm.riscv.vsseg6.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float* , i32)
3008declare void @llvm.riscv.vsseg6.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i1>, i32)
3009
3010define void @test_vsseg6_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %vl) {
3011; CHECK-LABEL: test_vsseg6_nxv2f32:
3012; CHECK:       # %bb.0: # %entry
3013; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
3014; CHECK-NEXT:    vmv1r.v v9, v8
3015; CHECK-NEXT:    vmv1r.v v10, v8
3016; CHECK-NEXT:    vmv1r.v v11, v8
3017; CHECK-NEXT:    vmv1r.v v12, v8
3018; CHECK-NEXT:    vmv1r.v v13, v8
3019; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
3020; CHECK-NEXT:    vsseg6e32.v v8, (a0)
3021; CHECK-NEXT:    ret
3022entry:
3023  tail call void @llvm.riscv.vsseg6.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i32 %vl)
3024  ret void
3025}
3026
3027define void @test_vsseg6_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl) {
3028; CHECK-LABEL: test_vsseg6_mask_nxv2f32:
3029; CHECK:       # %bb.0: # %entry
3030; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
3031; CHECK-NEXT:    vmv1r.v v9, v8
3032; CHECK-NEXT:    vmv1r.v v10, v8
3033; CHECK-NEXT:    vmv1r.v v11, v8
3034; CHECK-NEXT:    vmv1r.v v12, v8
3035; CHECK-NEXT:    vmv1r.v v13, v8
3036; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
3037; CHECK-NEXT:    vsseg6e32.v v8, (a0), v0.t
3038; CHECK-NEXT:    ret
3039entry:
3040  tail call void @llvm.riscv.vsseg6.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl)
3041  ret void
3042}
3043
3044declare void @llvm.riscv.vsseg7.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float* , i32)
3045declare void @llvm.riscv.vsseg7.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i1>, i32)
3046
3047define void @test_vsseg7_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %vl) {
3048; CHECK-LABEL: test_vsseg7_nxv2f32:
3049; CHECK:       # %bb.0: # %entry
3050; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
3051; CHECK-NEXT:    vmv1r.v v9, v8
3052; CHECK-NEXT:    vmv1r.v v10, v8
3053; CHECK-NEXT:    vmv1r.v v11, v8
3054; CHECK-NEXT:    vmv1r.v v12, v8
3055; CHECK-NEXT:    vmv1r.v v13, v8
3056; CHECK-NEXT:    vmv1r.v v14, v8
3057; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
3058; CHECK-NEXT:    vsseg7e32.v v8, (a0)
3059; CHECK-NEXT:    ret
3060entry:
3061  tail call void @llvm.riscv.vsseg7.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i32 %vl)
3062  ret void
3063}
3064
3065define void @test_vsseg7_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl) {
3066; CHECK-LABEL: test_vsseg7_mask_nxv2f32:
3067; CHECK:       # %bb.0: # %entry
3068; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
3069; CHECK-NEXT:    vmv1r.v v9, v8
3070; CHECK-NEXT:    vmv1r.v v10, v8
3071; CHECK-NEXT:    vmv1r.v v11, v8
3072; CHECK-NEXT:    vmv1r.v v12, v8
3073; CHECK-NEXT:    vmv1r.v v13, v8
3074; CHECK-NEXT:    vmv1r.v v14, v8
3075; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
3076; CHECK-NEXT:    vsseg7e32.v v8, (a0), v0.t
3077; CHECK-NEXT:    ret
3078entry:
3079  tail call void @llvm.riscv.vsseg7.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl)
3080  ret void
3081}
3082
3083declare void @llvm.riscv.vsseg8.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float* , i32)
3084declare void @llvm.riscv.vsseg8.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i1>, i32)
3085
3086define void @test_vsseg8_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %vl) {
3087; CHECK-LABEL: test_vsseg8_nxv2f32:
3088; CHECK:       # %bb.0: # %entry
3089; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
3090; CHECK-NEXT:    vmv1r.v v9, v8
3091; CHECK-NEXT:    vmv1r.v v10, v8
3092; CHECK-NEXT:    vmv1r.v v11, v8
3093; CHECK-NEXT:    vmv1r.v v12, v8
3094; CHECK-NEXT:    vmv1r.v v13, v8
3095; CHECK-NEXT:    vmv1r.v v14, v8
3096; CHECK-NEXT:    vmv1r.v v15, v8
3097; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
3098; CHECK-NEXT:    vsseg8e32.v v8, (a0)
3099; CHECK-NEXT:    ret
3100entry:
3101  tail call void @llvm.riscv.vsseg8.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i32 %vl)
3102  ret void
3103}
3104
3105define void @test_vsseg8_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl) {
3106; CHECK-LABEL: test_vsseg8_mask_nxv2f32:
3107; CHECK:       # %bb.0: # %entry
3108; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
3109; CHECK-NEXT:    vmv1r.v v9, v8
3110; CHECK-NEXT:    vmv1r.v v10, v8
3111; CHECK-NEXT:    vmv1r.v v11, v8
3112; CHECK-NEXT:    vmv1r.v v12, v8
3113; CHECK-NEXT:    vmv1r.v v13, v8
3114; CHECK-NEXT:    vmv1r.v v14, v8
3115; CHECK-NEXT:    vmv1r.v v15, v8
3116; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
3117; CHECK-NEXT:    vsseg8e32.v v8, (a0), v0.t
3118; CHECK-NEXT:    ret
3119entry:
3120  tail call void @llvm.riscv.vsseg8.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl)
3121  ret void
3122}
3123
3124declare void @llvm.riscv.vsseg2.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>, half* , i32)
3125declare void @llvm.riscv.vsseg2.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i1>, i32)
3126
3127define void @test_vsseg2_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %vl) {
3128; CHECK-LABEL: test_vsseg2_nxv1f16:
3129; CHECK:       # %bb.0: # %entry
3130; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
3131; CHECK-NEXT:    vmv1r.v v9, v8
3132; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
3133; CHECK-NEXT:    vsseg2e16.v v8, (a0)
3134; CHECK-NEXT:    ret
3135entry:
3136  tail call void @llvm.riscv.vsseg2.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i32 %vl)
3137  ret void
3138}
3139
3140define void @test_vsseg2_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl) {
3141; CHECK-LABEL: test_vsseg2_mask_nxv1f16:
3142; CHECK:       # %bb.0: # %entry
3143; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
3144; CHECK-NEXT:    vmv1r.v v9, v8
3145; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
3146; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
3147; CHECK-NEXT:    ret
3148entry:
3149  tail call void @llvm.riscv.vsseg2.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl)
3150  ret void
3151}
3152
3153declare void @llvm.riscv.vsseg3.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half* , i32)
3154declare void @llvm.riscv.vsseg3.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i1>, i32)
3155
3156define void @test_vsseg3_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %vl) {
3157; CHECK-LABEL: test_vsseg3_nxv1f16:
3158; CHECK:       # %bb.0: # %entry
3159; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
3160; CHECK-NEXT:    vmv1r.v v9, v8
3161; CHECK-NEXT:    vmv1r.v v10, v8
3162; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
3163; CHECK-NEXT:    vsseg3e16.v v8, (a0)
3164; CHECK-NEXT:    ret
3165entry:
3166  tail call void @llvm.riscv.vsseg3.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i32 %vl)
3167  ret void
3168}
3169
3170define void @test_vsseg3_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl) {
3171; CHECK-LABEL: test_vsseg3_mask_nxv1f16:
3172; CHECK:       # %bb.0: # %entry
3173; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
3174; CHECK-NEXT:    vmv1r.v v9, v8
3175; CHECK-NEXT:    vmv1r.v v10, v8
3176; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
3177; CHECK-NEXT:    vsseg3e16.v v8, (a0), v0.t
3178; CHECK-NEXT:    ret
3179entry:
3180  tail call void @llvm.riscv.vsseg3.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl)
3181  ret void
3182}
3183
3184declare void @llvm.riscv.vsseg4.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half* , i32)
3185declare void @llvm.riscv.vsseg4.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i1>, i32)
3186
3187define void @test_vsseg4_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %vl) {
3188; CHECK-LABEL: test_vsseg4_nxv1f16:
3189; CHECK:       # %bb.0: # %entry
3190; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
3191; CHECK-NEXT:    vmv1r.v v9, v8
3192; CHECK-NEXT:    vmv1r.v v10, v8
3193; CHECK-NEXT:    vmv1r.v v11, v8
3194; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
3195; CHECK-NEXT:    vsseg4e16.v v8, (a0)
3196; CHECK-NEXT:    ret
3197entry:
3198  tail call void @llvm.riscv.vsseg4.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i32 %vl)
3199  ret void
3200}
3201
3202define void @test_vsseg4_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl) {
3203; CHECK-LABEL: test_vsseg4_mask_nxv1f16:
3204; CHECK:       # %bb.0: # %entry
3205; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
3206; CHECK-NEXT:    vmv1r.v v9, v8
3207; CHECK-NEXT:    vmv1r.v v10, v8
3208; CHECK-NEXT:    vmv1r.v v11, v8
3209; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
3210; CHECK-NEXT:    vsseg4e16.v v8, (a0), v0.t
3211; CHECK-NEXT:    ret
3212entry:
3213  tail call void @llvm.riscv.vsseg4.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl)
3214  ret void
3215}
3216
3217declare void @llvm.riscv.vsseg5.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half* , i32)
3218declare void @llvm.riscv.vsseg5.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i1>, i32)
3219
3220define void @test_vsseg5_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %vl) {
3221; CHECK-LABEL: test_vsseg5_nxv1f16:
3222; CHECK:       # %bb.0: # %entry
3223; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
3224; CHECK-NEXT:    vmv1r.v v9, v8
3225; CHECK-NEXT:    vmv1r.v v10, v8
3226; CHECK-NEXT:    vmv1r.v v11, v8
3227; CHECK-NEXT:    vmv1r.v v12, v8
3228; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
3229; CHECK-NEXT:    vsseg5e16.v v8, (a0)
3230; CHECK-NEXT:    ret
3231entry:
3232  tail call void @llvm.riscv.vsseg5.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i32 %vl)
3233  ret void
3234}
3235
3236define void @test_vsseg5_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl) {
3237; CHECK-LABEL: test_vsseg5_mask_nxv1f16:
3238; CHECK:       # %bb.0: # %entry
3239; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
3240; CHECK-NEXT:    vmv1r.v v9, v8
3241; CHECK-NEXT:    vmv1r.v v10, v8
3242; CHECK-NEXT:    vmv1r.v v11, v8
3243; CHECK-NEXT:    vmv1r.v v12, v8
3244; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
3245; CHECK-NEXT:    vsseg5e16.v v8, (a0), v0.t
3246; CHECK-NEXT:    ret
3247entry:
3248  tail call void @llvm.riscv.vsseg5.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl)
3249  ret void
3250}
3251
3252declare void @llvm.riscv.vsseg6.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half* , i32)
3253declare void @llvm.riscv.vsseg6.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i1>, i32)
3254
3255define void @test_vsseg6_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %vl) {
3256; CHECK-LABEL: test_vsseg6_nxv1f16:
3257; CHECK:       # %bb.0: # %entry
3258; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
3259; CHECK-NEXT:    vmv1r.v v9, v8
3260; CHECK-NEXT:    vmv1r.v v10, v8
3261; CHECK-NEXT:    vmv1r.v v11, v8
3262; CHECK-NEXT:    vmv1r.v v12, v8
3263; CHECK-NEXT:    vmv1r.v v13, v8
3264; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
3265; CHECK-NEXT:    vsseg6e16.v v8, (a0)
3266; CHECK-NEXT:    ret
3267entry:
3268  tail call void @llvm.riscv.vsseg6.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i32 %vl)
3269  ret void
3270}
3271
3272define void @test_vsseg6_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl) {
3273; CHECK-LABEL: test_vsseg6_mask_nxv1f16:
3274; CHECK:       # %bb.0: # %entry
3275; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
3276; CHECK-NEXT:    vmv1r.v v9, v8
3277; CHECK-NEXT:    vmv1r.v v10, v8
3278; CHECK-NEXT:    vmv1r.v v11, v8
3279; CHECK-NEXT:    vmv1r.v v12, v8
3280; CHECK-NEXT:    vmv1r.v v13, v8
3281; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
3282; CHECK-NEXT:    vsseg6e16.v v8, (a0), v0.t
3283; CHECK-NEXT:    ret
3284entry:
3285  tail call void @llvm.riscv.vsseg6.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl)
3286  ret void
3287}
3288
3289declare void @llvm.riscv.vsseg7.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half* , i32)
3290declare void @llvm.riscv.vsseg7.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i1>, i32)
3291
3292define void @test_vsseg7_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %vl) {
3293; CHECK-LABEL: test_vsseg7_nxv1f16:
3294; CHECK:       # %bb.0: # %entry
3295; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
3296; CHECK-NEXT:    vmv1r.v v9, v8
3297; CHECK-NEXT:    vmv1r.v v10, v8
3298; CHECK-NEXT:    vmv1r.v v11, v8
3299; CHECK-NEXT:    vmv1r.v v12, v8
3300; CHECK-NEXT:    vmv1r.v v13, v8
3301; CHECK-NEXT:    vmv1r.v v14, v8
3302; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
3303; CHECK-NEXT:    vsseg7e16.v v8, (a0)
3304; CHECK-NEXT:    ret
3305entry:
3306  tail call void @llvm.riscv.vsseg7.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i32 %vl)
3307  ret void
3308}
3309
3310define void @test_vsseg7_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl) {
3311; CHECK-LABEL: test_vsseg7_mask_nxv1f16:
3312; CHECK:       # %bb.0: # %entry
3313; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
3314; CHECK-NEXT:    vmv1r.v v9, v8
3315; CHECK-NEXT:    vmv1r.v v10, v8
3316; CHECK-NEXT:    vmv1r.v v11, v8
3317; CHECK-NEXT:    vmv1r.v v12, v8
3318; CHECK-NEXT:    vmv1r.v v13, v8
3319; CHECK-NEXT:    vmv1r.v v14, v8
3320; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
3321; CHECK-NEXT:    vsseg7e16.v v8, (a0), v0.t
3322; CHECK-NEXT:    ret
3323entry:
3324  tail call void @llvm.riscv.vsseg7.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl)
3325  ret void
3326}
3327
3328declare void @llvm.riscv.vsseg8.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half* , i32)
3329declare void @llvm.riscv.vsseg8.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i1>, i32)
3330
3331define void @test_vsseg8_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %vl) {
3332; CHECK-LABEL: test_vsseg8_nxv1f16:
3333; CHECK:       # %bb.0: # %entry
3334; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
3335; CHECK-NEXT:    vmv1r.v v9, v8
3336; CHECK-NEXT:    vmv1r.v v10, v8
3337; CHECK-NEXT:    vmv1r.v v11, v8
3338; CHECK-NEXT:    vmv1r.v v12, v8
3339; CHECK-NEXT:    vmv1r.v v13, v8
3340; CHECK-NEXT:    vmv1r.v v14, v8
3341; CHECK-NEXT:    vmv1r.v v15, v8
3342; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
3343; CHECK-NEXT:    vsseg8e16.v v8, (a0)
3344; CHECK-NEXT:    ret
3345entry:
3346  tail call void @llvm.riscv.vsseg8.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i32 %vl)
3347  ret void
3348}
3349
3350define void @test_vsseg8_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl) {
3351; CHECK-LABEL: test_vsseg8_mask_nxv1f16:
3352; CHECK:       # %bb.0: # %entry
3353; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
3354; CHECK-NEXT:    vmv1r.v v9, v8
3355; CHECK-NEXT:    vmv1r.v v10, v8
3356; CHECK-NEXT:    vmv1r.v v11, v8
3357; CHECK-NEXT:    vmv1r.v v12, v8
3358; CHECK-NEXT:    vmv1r.v v13, v8
3359; CHECK-NEXT:    vmv1r.v v14, v8
3360; CHECK-NEXT:    vmv1r.v v15, v8
3361; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
3362; CHECK-NEXT:    vsseg8e16.v v8, (a0), v0.t
3363; CHECK-NEXT:    ret
3364entry:
3365  tail call void @llvm.riscv.vsseg8.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl)
3366  ret void
3367}
3368
3369declare void @llvm.riscv.vsseg2.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>, float* , i32)
3370declare void @llvm.riscv.vsseg2.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i1>, i32)
3371
3372define void @test_vsseg2_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %vl) {
3373; CHECK-LABEL: test_vsseg2_nxv1f32:
3374; CHECK:       # %bb.0: # %entry
3375; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
3376; CHECK-NEXT:    vmv1r.v v9, v8
3377; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
3378; CHECK-NEXT:    vsseg2e32.v v8, (a0)
3379; CHECK-NEXT:    ret
3380entry:
3381  tail call void @llvm.riscv.vsseg2.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i32 %vl)
3382  ret void
3383}
3384
3385define void @test_vsseg2_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl) {
3386; CHECK-LABEL: test_vsseg2_mask_nxv1f32:
3387; CHECK:       # %bb.0: # %entry
3388; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
3389; CHECK-NEXT:    vmv1r.v v9, v8
3390; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
3391; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
3392; CHECK-NEXT:    ret
3393entry:
3394  tail call void @llvm.riscv.vsseg2.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl)
3395  ret void
3396}
3397
3398declare void @llvm.riscv.vsseg3.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float* , i32)
3399declare void @llvm.riscv.vsseg3.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i1>, i32)
3400
3401define void @test_vsseg3_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %vl) {
3402; CHECK-LABEL: test_vsseg3_nxv1f32:
3403; CHECK:       # %bb.0: # %entry
3404; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
3405; CHECK-NEXT:    vmv1r.v v9, v8
3406; CHECK-NEXT:    vmv1r.v v10, v8
3407; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
3408; CHECK-NEXT:    vsseg3e32.v v8, (a0)
3409; CHECK-NEXT:    ret
3410entry:
3411  tail call void @llvm.riscv.vsseg3.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i32 %vl)
3412  ret void
3413}
3414
3415define void @test_vsseg3_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl) {
3416; CHECK-LABEL: test_vsseg3_mask_nxv1f32:
3417; CHECK:       # %bb.0: # %entry
3418; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
3419; CHECK-NEXT:    vmv1r.v v9, v8
3420; CHECK-NEXT:    vmv1r.v v10, v8
3421; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
3422; CHECK-NEXT:    vsseg3e32.v v8, (a0), v0.t
3423; CHECK-NEXT:    ret
3424entry:
3425  tail call void @llvm.riscv.vsseg3.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl)
3426  ret void
3427}
3428
3429declare void @llvm.riscv.vsseg4.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float* , i32)
3430declare void @llvm.riscv.vsseg4.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i1>, i32)
3431
3432define void @test_vsseg4_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %vl) {
3433; CHECK-LABEL: test_vsseg4_nxv1f32:
3434; CHECK:       # %bb.0: # %entry
3435; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
3436; CHECK-NEXT:    vmv1r.v v9, v8
3437; CHECK-NEXT:    vmv1r.v v10, v8
3438; CHECK-NEXT:    vmv1r.v v11, v8
3439; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
3440; CHECK-NEXT:    vsseg4e32.v v8, (a0)
3441; CHECK-NEXT:    ret
3442entry:
3443  tail call void @llvm.riscv.vsseg4.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i32 %vl)
3444  ret void
3445}
3446
3447define void @test_vsseg4_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl) {
3448; CHECK-LABEL: test_vsseg4_mask_nxv1f32:
3449; CHECK:       # %bb.0: # %entry
3450; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
3451; CHECK-NEXT:    vmv1r.v v9, v8
3452; CHECK-NEXT:    vmv1r.v v10, v8
3453; CHECK-NEXT:    vmv1r.v v11, v8
3454; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
3455; CHECK-NEXT:    vsseg4e32.v v8, (a0), v0.t
3456; CHECK-NEXT:    ret
3457entry:
3458  tail call void @llvm.riscv.vsseg4.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl)
3459  ret void
3460}
3461
3462declare void @llvm.riscv.vsseg5.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float* , i32)
3463declare void @llvm.riscv.vsseg5.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i1>, i32)
3464
3465define void @test_vsseg5_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %vl) {
3466; CHECK-LABEL: test_vsseg5_nxv1f32:
3467; CHECK:       # %bb.0: # %entry
3468; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
3469; CHECK-NEXT:    vmv1r.v v9, v8
3470; CHECK-NEXT:    vmv1r.v v10, v8
3471; CHECK-NEXT:    vmv1r.v v11, v8
3472; CHECK-NEXT:    vmv1r.v v12, v8
3473; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
3474; CHECK-NEXT:    vsseg5e32.v v8, (a0)
3475; CHECK-NEXT:    ret
3476entry:
3477  tail call void @llvm.riscv.vsseg5.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i32 %vl)
3478  ret void
3479}
3480
3481define void @test_vsseg5_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl) {
3482; CHECK-LABEL: test_vsseg5_mask_nxv1f32:
3483; CHECK:       # %bb.0: # %entry
3484; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
3485; CHECK-NEXT:    vmv1r.v v9, v8
3486; CHECK-NEXT:    vmv1r.v v10, v8
3487; CHECK-NEXT:    vmv1r.v v11, v8
3488; CHECK-NEXT:    vmv1r.v v12, v8
3489; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
3490; CHECK-NEXT:    vsseg5e32.v v8, (a0), v0.t
3491; CHECK-NEXT:    ret
3492entry:
3493  tail call void @llvm.riscv.vsseg5.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl)
3494  ret void
3495}
3496
3497declare void @llvm.riscv.vsseg6.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float* , i32)
3498declare void @llvm.riscv.vsseg6.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i1>, i32)
3499
3500define void @test_vsseg6_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %vl) {
3501; CHECK-LABEL: test_vsseg6_nxv1f32:
3502; CHECK:       # %bb.0: # %entry
3503; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
3504; CHECK-NEXT:    vmv1r.v v9, v8
3505; CHECK-NEXT:    vmv1r.v v10, v8
3506; CHECK-NEXT:    vmv1r.v v11, v8
3507; CHECK-NEXT:    vmv1r.v v12, v8
3508; CHECK-NEXT:    vmv1r.v v13, v8
3509; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
3510; CHECK-NEXT:    vsseg6e32.v v8, (a0)
3511; CHECK-NEXT:    ret
3512entry:
3513  tail call void @llvm.riscv.vsseg6.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i32 %vl)
3514  ret void
3515}
3516
3517define void @test_vsseg6_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl) {
3518; CHECK-LABEL: test_vsseg6_mask_nxv1f32:
3519; CHECK:       # %bb.0: # %entry
3520; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
3521; CHECK-NEXT:    vmv1r.v v9, v8
3522; CHECK-NEXT:    vmv1r.v v10, v8
3523; CHECK-NEXT:    vmv1r.v v11, v8
3524; CHECK-NEXT:    vmv1r.v v12, v8
3525; CHECK-NEXT:    vmv1r.v v13, v8
3526; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
3527; CHECK-NEXT:    vsseg6e32.v v8, (a0), v0.t
3528; CHECK-NEXT:    ret
3529entry:
3530  tail call void @llvm.riscv.vsseg6.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl)
3531  ret void
3532}
3533
3534declare void @llvm.riscv.vsseg7.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float* , i32)
3535declare void @llvm.riscv.vsseg7.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i1>, i32)
3536
3537define void @test_vsseg7_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %vl) {
3538; CHECK-LABEL: test_vsseg7_nxv1f32:
3539; CHECK:       # %bb.0: # %entry
3540; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
3541; CHECK-NEXT:    vmv1r.v v9, v8
3542; CHECK-NEXT:    vmv1r.v v10, v8
3543; CHECK-NEXT:    vmv1r.v v11, v8
3544; CHECK-NEXT:    vmv1r.v v12, v8
3545; CHECK-NEXT:    vmv1r.v v13, v8
3546; CHECK-NEXT:    vmv1r.v v14, v8
3547; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
3548; CHECK-NEXT:    vsseg7e32.v v8, (a0)
3549; CHECK-NEXT:    ret
3550entry:
3551  tail call void @llvm.riscv.vsseg7.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i32 %vl)
3552  ret void
3553}
3554
3555define void @test_vsseg7_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl) {
3556; CHECK-LABEL: test_vsseg7_mask_nxv1f32:
3557; CHECK:       # %bb.0: # %entry
3558; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
3559; CHECK-NEXT:    vmv1r.v v9, v8
3560; CHECK-NEXT:    vmv1r.v v10, v8
3561; CHECK-NEXT:    vmv1r.v v11, v8
3562; CHECK-NEXT:    vmv1r.v v12, v8
3563; CHECK-NEXT:    vmv1r.v v13, v8
3564; CHECK-NEXT:    vmv1r.v v14, v8
3565; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
3566; CHECK-NEXT:    vsseg7e32.v v8, (a0), v0.t
3567; CHECK-NEXT:    ret
3568entry:
3569  tail call void @llvm.riscv.vsseg7.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl)
3570  ret void
3571}
3572
3573declare void @llvm.riscv.vsseg8.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float* , i32)
3574declare void @llvm.riscv.vsseg8.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i1>, i32)
3575
3576define void @test_vsseg8_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %vl) {
3577; CHECK-LABEL: test_vsseg8_nxv1f32:
3578; CHECK:       # %bb.0: # %entry
3579; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
3580; CHECK-NEXT:    vmv1r.v v9, v8
3581; CHECK-NEXT:    vmv1r.v v10, v8
3582; CHECK-NEXT:    vmv1r.v v11, v8
3583; CHECK-NEXT:    vmv1r.v v12, v8
3584; CHECK-NEXT:    vmv1r.v v13, v8
3585; CHECK-NEXT:    vmv1r.v v14, v8
3586; CHECK-NEXT:    vmv1r.v v15, v8
3587; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
3588; CHECK-NEXT:    vsseg8e32.v v8, (a0)
3589; CHECK-NEXT:    ret
3590entry:
3591  tail call void @llvm.riscv.vsseg8.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i32 %vl)
3592  ret void
3593}
3594
3595define void @test_vsseg8_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl) {
3596; CHECK-LABEL: test_vsseg8_mask_nxv1f32:
3597; CHECK:       # %bb.0: # %entry
3598; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
3599; CHECK-NEXT:    vmv1r.v v9, v8
3600; CHECK-NEXT:    vmv1r.v v10, v8
3601; CHECK-NEXT:    vmv1r.v v11, v8
3602; CHECK-NEXT:    vmv1r.v v12, v8
3603; CHECK-NEXT:    vmv1r.v v13, v8
3604; CHECK-NEXT:    vmv1r.v v14, v8
3605; CHECK-NEXT:    vmv1r.v v15, v8
3606; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
3607; CHECK-NEXT:    vsseg8e32.v v8, (a0), v0.t
3608; CHECK-NEXT:    ret
3609entry:
3610  tail call void @llvm.riscv.vsseg8.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl)
3611  ret void
3612}
3613
3614declare void @llvm.riscv.vsseg2.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>, half* , i32)
3615declare void @llvm.riscv.vsseg2.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i1>, i32)
3616
3617define void @test_vsseg2_nxv8f16(<vscale x 8 x half> %val, half* %base, i32 %vl) {
3618; CHECK-LABEL: test_vsseg2_nxv8f16:
3619; CHECK:       # %bb.0: # %entry
3620; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
3621; CHECK-NEXT:    vmv2r.v v10, v8
3622; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
3623; CHECK-NEXT:    vsseg2e16.v v8, (a0)
3624; CHECK-NEXT:    ret
3625entry:
3626  tail call void @llvm.riscv.vsseg2.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, i32 %vl)
3627  ret void
3628}
3629
3630define void @test_vsseg2_mask_nxv8f16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i1> %mask, i32 %vl) {
3631; CHECK-LABEL: test_vsseg2_mask_nxv8f16:
3632; CHECK:       # %bb.0: # %entry
3633; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
3634; CHECK-NEXT:    vmv2r.v v10, v8
3635; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
3636; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
3637; CHECK-NEXT:    ret
3638entry:
3639  tail call void @llvm.riscv.vsseg2.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i1> %mask, i32 %vl)
3640  ret void
3641}
3642
3643declare void @llvm.riscv.vsseg3.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half* , i32)
3644declare void @llvm.riscv.vsseg3.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i1>, i32)
3645
3646define void @test_vsseg3_nxv8f16(<vscale x 8 x half> %val, half* %base, i32 %vl) {
3647; CHECK-LABEL: test_vsseg3_nxv8f16:
3648; CHECK:       # %bb.0: # %entry
3649; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
3650; CHECK-NEXT:    vmv2r.v v10, v8
3651; CHECK-NEXT:    vmv2r.v v12, v8
3652; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
3653; CHECK-NEXT:    vsseg3e16.v v8, (a0)
3654; CHECK-NEXT:    ret
3655entry:
3656  tail call void @llvm.riscv.vsseg3.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, i32 %vl)
3657  ret void
3658}
3659
3660define void @test_vsseg3_mask_nxv8f16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i1> %mask, i32 %vl) {
3661; CHECK-LABEL: test_vsseg3_mask_nxv8f16:
3662; CHECK:       # %bb.0: # %entry
3663; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
3664; CHECK-NEXT:    vmv2r.v v10, v8
3665; CHECK-NEXT:    vmv2r.v v12, v8
3666; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
3667; CHECK-NEXT:    vsseg3e16.v v8, (a0), v0.t
3668; CHECK-NEXT:    ret
3669entry:
3670  tail call void @llvm.riscv.vsseg3.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i1> %mask, i32 %vl)
3671  ret void
3672}
3673
3674declare void @llvm.riscv.vsseg4.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half* , i32)
3675declare void @llvm.riscv.vsseg4.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i1>, i32)
3676
3677define void @test_vsseg4_nxv8f16(<vscale x 8 x half> %val, half* %base, i32 %vl) {
3678; CHECK-LABEL: test_vsseg4_nxv8f16:
3679; CHECK:       # %bb.0: # %entry
3680; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
3681; CHECK-NEXT:    vmv2r.v v10, v8
3682; CHECK-NEXT:    vmv2r.v v12, v8
3683; CHECK-NEXT:    vmv2r.v v14, v8
3684; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
3685; CHECK-NEXT:    vsseg4e16.v v8, (a0)
3686; CHECK-NEXT:    ret
3687entry:
3688  tail call void @llvm.riscv.vsseg4.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, i32 %vl)
3689  ret void
3690}
3691
3692define void @test_vsseg4_mask_nxv8f16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i1> %mask, i32 %vl) {
3693; CHECK-LABEL: test_vsseg4_mask_nxv8f16:
3694; CHECK:       # %bb.0: # %entry
3695; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
3696; CHECK-NEXT:    vmv2r.v v10, v8
3697; CHECK-NEXT:    vmv2r.v v12, v8
3698; CHECK-NEXT:    vmv2r.v v14, v8
3699; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
3700; CHECK-NEXT:    vsseg4e16.v v8, (a0), v0.t
3701; CHECK-NEXT:    ret
3702entry:
3703  tail call void @llvm.riscv.vsseg4.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i1> %mask, i32 %vl)
3704  ret void
3705}
3706
3707declare void @llvm.riscv.vsseg2.nxv8f32(<vscale x 8 x float>,<vscale x 8 x float>, float* , i32)
3708declare void @llvm.riscv.vsseg2.mask.nxv8f32(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 8 x i1>, i32)
3709
3710define void @test_vsseg2_nxv8f32(<vscale x 8 x float> %val, float* %base, i32 %vl) {
3711; CHECK-LABEL: test_vsseg2_nxv8f32:
3712; CHECK:       # %bb.0: # %entry
3713; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
3714; CHECK-NEXT:    vmv4r.v v12, v8
3715; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
3716; CHECK-NEXT:    vsseg2e32.v v8, (a0)
3717; CHECK-NEXT:    ret
3718entry:
3719  tail call void @llvm.riscv.vsseg2.nxv8f32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, i32 %vl)
3720  ret void
3721}
3722
3723define void @test_vsseg2_mask_nxv8f32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i1> %mask, i32 %vl) {
3724; CHECK-LABEL: test_vsseg2_mask_nxv8f32:
3725; CHECK:       # %bb.0: # %entry
3726; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
3727; CHECK-NEXT:    vmv4r.v v12, v8
3728; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
3729; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
3730; CHECK-NEXT:    ret
3731entry:
3732  tail call void @llvm.riscv.vsseg2.mask.nxv8f32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 8 x i1> %mask, i32 %vl)
3733  ret void
3734}
3735
3736declare void @llvm.riscv.vsseg2.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>, double* , i32)
3737declare void @llvm.riscv.vsseg2.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i1>, i32)
3738
3739define void @test_vsseg2_nxv2f64(<vscale x 2 x double> %val, double* %base, i32 %vl) {
3740; CHECK-LABEL: test_vsseg2_nxv2f64:
3741; CHECK:       # %bb.0: # %entry
3742; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
3743; CHECK-NEXT:    vmv2r.v v10, v8
3744; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
3745; CHECK-NEXT:    vsseg2e64.v v8, (a0)
3746; CHECK-NEXT:    ret
3747entry:
3748  tail call void @llvm.riscv.vsseg2.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, i32 %vl)
3749  ret void
3750}
3751
3752define void @test_vsseg2_mask_nxv2f64(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i1> %mask, i32 %vl) {
3753; CHECK-LABEL: test_vsseg2_mask_nxv2f64:
3754; CHECK:       # %bb.0: # %entry
3755; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
3756; CHECK-NEXT:    vmv2r.v v10, v8
3757; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
3758; CHECK-NEXT:    vsseg2e64.v v8, (a0), v0.t
3759; CHECK-NEXT:    ret
3760entry:
3761  tail call void @llvm.riscv.vsseg2.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i1> %mask, i32 %vl)
3762  ret void
3763}
3764
3765declare void @llvm.riscv.vsseg3.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double* , i32)
3766declare void @llvm.riscv.vsseg3.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i1>, i32)
3767
3768define void @test_vsseg3_nxv2f64(<vscale x 2 x double> %val, double* %base, i32 %vl) {
3769; CHECK-LABEL: test_vsseg3_nxv2f64:
3770; CHECK:       # %bb.0: # %entry
3771; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
3772; CHECK-NEXT:    vmv2r.v v10, v8
3773; CHECK-NEXT:    vmv2r.v v12, v8
3774; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
3775; CHECK-NEXT:    vsseg3e64.v v8, (a0)
3776; CHECK-NEXT:    ret
3777entry:
3778  tail call void @llvm.riscv.vsseg3.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, i32 %vl)
3779  ret void
3780}
3781
3782define void @test_vsseg3_mask_nxv2f64(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i1> %mask, i32 %vl) {
3783; CHECK-LABEL: test_vsseg3_mask_nxv2f64:
3784; CHECK:       # %bb.0: # %entry
3785; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
3786; CHECK-NEXT:    vmv2r.v v10, v8
3787; CHECK-NEXT:    vmv2r.v v12, v8
3788; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
3789; CHECK-NEXT:    vsseg3e64.v v8, (a0), v0.t
3790; CHECK-NEXT:    ret
3791entry:
3792  tail call void @llvm.riscv.vsseg3.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i1> %mask, i32 %vl)
3793  ret void
3794}
3795
3796declare void @llvm.riscv.vsseg4.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double* , i32)
3797declare void @llvm.riscv.vsseg4.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i1>, i32)
3798
3799define void @test_vsseg4_nxv2f64(<vscale x 2 x double> %val, double* %base, i32 %vl) {
3800; CHECK-LABEL: test_vsseg4_nxv2f64:
3801; CHECK:       # %bb.0: # %entry
3802; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
3803; CHECK-NEXT:    vmv2r.v v10, v8
3804; CHECK-NEXT:    vmv2r.v v12, v8
3805; CHECK-NEXT:    vmv2r.v v14, v8
3806; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
3807; CHECK-NEXT:    vsseg4e64.v v8, (a0)
3808; CHECK-NEXT:    ret
3809entry:
3810  tail call void @llvm.riscv.vsseg4.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, i32 %vl)
3811  ret void
3812}
3813
3814define void @test_vsseg4_mask_nxv2f64(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i1> %mask, i32 %vl) {
3815; CHECK-LABEL: test_vsseg4_mask_nxv2f64:
3816; CHECK:       # %bb.0: # %entry
3817; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
3818; CHECK-NEXT:    vmv2r.v v10, v8
3819; CHECK-NEXT:    vmv2r.v v12, v8
3820; CHECK-NEXT:    vmv2r.v v14, v8
3821; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
3822; CHECK-NEXT:    vsseg4e64.v v8, (a0), v0.t
3823; CHECK-NEXT:    ret
3824entry:
3825  tail call void @llvm.riscv.vsseg4.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i1> %mask, i32 %vl)
3826  ret void
3827}
3828
3829declare void @llvm.riscv.vsseg2.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>, half* , i32)
3830declare void @llvm.riscv.vsseg2.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i1>, i32)
3831
3832define void @test_vsseg2_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %vl) {
3833; CHECK-LABEL: test_vsseg2_nxv4f16:
3834; CHECK:       # %bb.0: # %entry
3835; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
3836; CHECK-NEXT:    vmv1r.v v9, v8
3837; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
3838; CHECK-NEXT:    vsseg2e16.v v8, (a0)
3839; CHECK-NEXT:    ret
3840entry:
3841  tail call void @llvm.riscv.vsseg2.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i32 %vl)
3842  ret void
3843}
3844
3845define void @test_vsseg2_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl) {
3846; CHECK-LABEL: test_vsseg2_mask_nxv4f16:
3847; CHECK:       # %bb.0: # %entry
3848; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
3849; CHECK-NEXT:    vmv1r.v v9, v8
3850; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
3851; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
3852; CHECK-NEXT:    ret
3853entry:
3854  tail call void @llvm.riscv.vsseg2.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl)
3855  ret void
3856}
3857
3858declare void @llvm.riscv.vsseg3.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half* , i32)
3859declare void @llvm.riscv.vsseg3.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i1>, i32)
3860
3861define void @test_vsseg3_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %vl) {
3862; CHECK-LABEL: test_vsseg3_nxv4f16:
3863; CHECK:       # %bb.0: # %entry
3864; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
3865; CHECK-NEXT:    vmv1r.v v9, v8
3866; CHECK-NEXT:    vmv1r.v v10, v8
3867; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
3868; CHECK-NEXT:    vsseg3e16.v v8, (a0)
3869; CHECK-NEXT:    ret
3870entry:
3871  tail call void @llvm.riscv.vsseg3.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i32 %vl)
3872  ret void
3873}
3874
3875define void @test_vsseg3_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl) {
3876; CHECK-LABEL: test_vsseg3_mask_nxv4f16:
3877; CHECK:       # %bb.0: # %entry
3878; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
3879; CHECK-NEXT:    vmv1r.v v9, v8
3880; CHECK-NEXT:    vmv1r.v v10, v8
3881; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
3882; CHECK-NEXT:    vsseg3e16.v v8, (a0), v0.t
3883; CHECK-NEXT:    ret
3884entry:
3885  tail call void @llvm.riscv.vsseg3.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl)
3886  ret void
3887}
3888
3889declare void @llvm.riscv.vsseg4.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half* , i32)
3890declare void @llvm.riscv.vsseg4.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i1>, i32)
3891
3892define void @test_vsseg4_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %vl) {
3893; CHECK-LABEL: test_vsseg4_nxv4f16:
3894; CHECK:       # %bb.0: # %entry
3895; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
3896; CHECK-NEXT:    vmv1r.v v9, v8
3897; CHECK-NEXT:    vmv1r.v v10, v8
3898; CHECK-NEXT:    vmv1r.v v11, v8
3899; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
3900; CHECK-NEXT:    vsseg4e16.v v8, (a0)
3901; CHECK-NEXT:    ret
3902entry:
3903  tail call void @llvm.riscv.vsseg4.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i32 %vl)
3904  ret void
3905}
3906
3907define void @test_vsseg4_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl) {
3908; CHECK-LABEL: test_vsseg4_mask_nxv4f16:
3909; CHECK:       # %bb.0: # %entry
3910; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
3911; CHECK-NEXT:    vmv1r.v v9, v8
3912; CHECK-NEXT:    vmv1r.v v10, v8
3913; CHECK-NEXT:    vmv1r.v v11, v8
3914; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
3915; CHECK-NEXT:    vsseg4e16.v v8, (a0), v0.t
3916; CHECK-NEXT:    ret
3917entry:
3918  tail call void @llvm.riscv.vsseg4.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl)
3919  ret void
3920}
3921
3922declare void @llvm.riscv.vsseg5.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half* , i32)
3923declare void @llvm.riscv.vsseg5.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i1>, i32)
3924
3925define void @test_vsseg5_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %vl) {
3926; CHECK-LABEL: test_vsseg5_nxv4f16:
3927; CHECK:       # %bb.0: # %entry
3928; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
3929; CHECK-NEXT:    vmv1r.v v9, v8
3930; CHECK-NEXT:    vmv1r.v v10, v8
3931; CHECK-NEXT:    vmv1r.v v11, v8
3932; CHECK-NEXT:    vmv1r.v v12, v8
3933; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
3934; CHECK-NEXT:    vsseg5e16.v v8, (a0)
3935; CHECK-NEXT:    ret
3936entry:
3937  tail call void @llvm.riscv.vsseg5.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i32 %vl)
3938  ret void
3939}
3940
3941define void @test_vsseg5_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl) {
3942; CHECK-LABEL: test_vsseg5_mask_nxv4f16:
3943; CHECK:       # %bb.0: # %entry
3944; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
3945; CHECK-NEXT:    vmv1r.v v9, v8
3946; CHECK-NEXT:    vmv1r.v v10, v8
3947; CHECK-NEXT:    vmv1r.v v11, v8
3948; CHECK-NEXT:    vmv1r.v v12, v8
3949; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
3950; CHECK-NEXT:    vsseg5e16.v v8, (a0), v0.t
3951; CHECK-NEXT:    ret
3952entry:
3953  tail call void @llvm.riscv.vsseg5.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl)
3954  ret void
3955}
3956
3957declare void @llvm.riscv.vsseg6.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half* , i32)
3958declare void @llvm.riscv.vsseg6.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i1>, i32)
3959
3960define void @test_vsseg6_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %vl) {
3961; CHECK-LABEL: test_vsseg6_nxv4f16:
3962; CHECK:       # %bb.0: # %entry
3963; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
3964; CHECK-NEXT:    vmv1r.v v9, v8
3965; CHECK-NEXT:    vmv1r.v v10, v8
3966; CHECK-NEXT:    vmv1r.v v11, v8
3967; CHECK-NEXT:    vmv1r.v v12, v8
3968; CHECK-NEXT:    vmv1r.v v13, v8
3969; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
3970; CHECK-NEXT:    vsseg6e16.v v8, (a0)
3971; CHECK-NEXT:    ret
3972entry:
3973  tail call void @llvm.riscv.vsseg6.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i32 %vl)
3974  ret void
3975}
3976
3977define void @test_vsseg6_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl) {
3978; CHECK-LABEL: test_vsseg6_mask_nxv4f16:
3979; CHECK:       # %bb.0: # %entry
3980; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
3981; CHECK-NEXT:    vmv1r.v v9, v8
3982; CHECK-NEXT:    vmv1r.v v10, v8
3983; CHECK-NEXT:    vmv1r.v v11, v8
3984; CHECK-NEXT:    vmv1r.v v12, v8
3985; CHECK-NEXT:    vmv1r.v v13, v8
3986; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
3987; CHECK-NEXT:    vsseg6e16.v v8, (a0), v0.t
3988; CHECK-NEXT:    ret
3989entry:
3990  tail call void @llvm.riscv.vsseg6.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl)
3991  ret void
3992}
3993
3994declare void @llvm.riscv.vsseg7.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half* , i32)
3995declare void @llvm.riscv.vsseg7.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i1>, i32)
3996
3997define void @test_vsseg7_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %vl) {
3998; CHECK-LABEL: test_vsseg7_nxv4f16:
3999; CHECK:       # %bb.0: # %entry
4000; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
4001; CHECK-NEXT:    vmv1r.v v9, v8
4002; CHECK-NEXT:    vmv1r.v v10, v8
4003; CHECK-NEXT:    vmv1r.v v11, v8
4004; CHECK-NEXT:    vmv1r.v v12, v8
4005; CHECK-NEXT:    vmv1r.v v13, v8
4006; CHECK-NEXT:    vmv1r.v v14, v8
4007; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
4008; CHECK-NEXT:    vsseg7e16.v v8, (a0)
4009; CHECK-NEXT:    ret
4010entry:
4011  tail call void @llvm.riscv.vsseg7.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i32 %vl)
4012  ret void
4013}
4014
4015define void @test_vsseg7_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl) {
4016; CHECK-LABEL: test_vsseg7_mask_nxv4f16:
4017; CHECK:       # %bb.0: # %entry
4018; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
4019; CHECK-NEXT:    vmv1r.v v9, v8
4020; CHECK-NEXT:    vmv1r.v v10, v8
4021; CHECK-NEXT:    vmv1r.v v11, v8
4022; CHECK-NEXT:    vmv1r.v v12, v8
4023; CHECK-NEXT:    vmv1r.v v13, v8
4024; CHECK-NEXT:    vmv1r.v v14, v8
4025; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
4026; CHECK-NEXT:    vsseg7e16.v v8, (a0), v0.t
4027; CHECK-NEXT:    ret
4028entry:
4029  tail call void @llvm.riscv.vsseg7.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl)
4030  ret void
4031}
4032
4033declare void @llvm.riscv.vsseg8.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half* , i32)
4034declare void @llvm.riscv.vsseg8.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i1>, i32)
4035
4036define void @test_vsseg8_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %vl) {
4037; CHECK-LABEL: test_vsseg8_nxv4f16:
4038; CHECK:       # %bb.0: # %entry
4039; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
4040; CHECK-NEXT:    vmv1r.v v9, v8
4041; CHECK-NEXT:    vmv1r.v v10, v8
4042; CHECK-NEXT:    vmv1r.v v11, v8
4043; CHECK-NEXT:    vmv1r.v v12, v8
4044; CHECK-NEXT:    vmv1r.v v13, v8
4045; CHECK-NEXT:    vmv1r.v v14, v8
4046; CHECK-NEXT:    vmv1r.v v15, v8
4047; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
4048; CHECK-NEXT:    vsseg8e16.v v8, (a0)
4049; CHECK-NEXT:    ret
4050entry:
4051  tail call void @llvm.riscv.vsseg8.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i32 %vl)
4052  ret void
4053}
4054
4055define void @test_vsseg8_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl) {
4056; CHECK-LABEL: test_vsseg8_mask_nxv4f16:
4057; CHECK:       # %bb.0: # %entry
4058; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
4059; CHECK-NEXT:    vmv1r.v v9, v8
4060; CHECK-NEXT:    vmv1r.v v10, v8
4061; CHECK-NEXT:    vmv1r.v v11, v8
4062; CHECK-NEXT:    vmv1r.v v12, v8
4063; CHECK-NEXT:    vmv1r.v v13, v8
4064; CHECK-NEXT:    vmv1r.v v14, v8
4065; CHECK-NEXT:    vmv1r.v v15, v8
4066; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
4067; CHECK-NEXT:    vsseg8e16.v v8, (a0), v0.t
4068; CHECK-NEXT:    ret
4069entry:
4070  tail call void @llvm.riscv.vsseg8.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl)
4071  ret void
4072}
4073
4074declare void @llvm.riscv.vsseg2.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>, half* , i32)
4075declare void @llvm.riscv.vsseg2.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i1>, i32)
4076
4077define void @test_vsseg2_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %vl) {
4078; CHECK-LABEL: test_vsseg2_nxv2f16:
4079; CHECK:       # %bb.0: # %entry
4080; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
4081; CHECK-NEXT:    vmv1r.v v9, v8
4082; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
4083; CHECK-NEXT:    vsseg2e16.v v8, (a0)
4084; CHECK-NEXT:    ret
4085entry:
4086  tail call void @llvm.riscv.vsseg2.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i32 %vl)
4087  ret void
4088}
4089
4090define void @test_vsseg2_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl) {
4091; CHECK-LABEL: test_vsseg2_mask_nxv2f16:
4092; CHECK:       # %bb.0: # %entry
4093; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
4094; CHECK-NEXT:    vmv1r.v v9, v8
4095; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
4096; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
4097; CHECK-NEXT:    ret
4098entry:
4099  tail call void @llvm.riscv.vsseg2.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl)
4100  ret void
4101}
4102
4103declare void @llvm.riscv.vsseg3.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half* , i32)
4104declare void @llvm.riscv.vsseg3.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i1>, i32)
4105
4106define void @test_vsseg3_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %vl) {
4107; CHECK-LABEL: test_vsseg3_nxv2f16:
4108; CHECK:       # %bb.0: # %entry
4109; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
4110; CHECK-NEXT:    vmv1r.v v9, v8
4111; CHECK-NEXT:    vmv1r.v v10, v8
4112; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
4113; CHECK-NEXT:    vsseg3e16.v v8, (a0)
4114; CHECK-NEXT:    ret
4115entry:
4116  tail call void @llvm.riscv.vsseg3.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i32 %vl)
4117  ret void
4118}
4119
4120define void @test_vsseg3_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl) {
4121; CHECK-LABEL: test_vsseg3_mask_nxv2f16:
4122; CHECK:       # %bb.0: # %entry
4123; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
4124; CHECK-NEXT:    vmv1r.v v9, v8
4125; CHECK-NEXT:    vmv1r.v v10, v8
4126; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
4127; CHECK-NEXT:    vsseg3e16.v v8, (a0), v0.t
4128; CHECK-NEXT:    ret
4129entry:
4130  tail call void @llvm.riscv.vsseg3.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl)
4131  ret void
4132}
4133
4134declare void @llvm.riscv.vsseg4.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half* , i32)
4135declare void @llvm.riscv.vsseg4.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i1>, i32)
4136
4137define void @test_vsseg4_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %vl) {
4138; CHECK-LABEL: test_vsseg4_nxv2f16:
4139; CHECK:       # %bb.0: # %entry
4140; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
4141; CHECK-NEXT:    vmv1r.v v9, v8
4142; CHECK-NEXT:    vmv1r.v v10, v8
4143; CHECK-NEXT:    vmv1r.v v11, v8
4144; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
4145; CHECK-NEXT:    vsseg4e16.v v8, (a0)
4146; CHECK-NEXT:    ret
4147entry:
4148  tail call void @llvm.riscv.vsseg4.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i32 %vl)
4149  ret void
4150}
4151
4152define void @test_vsseg4_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl) {
4153; CHECK-LABEL: test_vsseg4_mask_nxv2f16:
4154; CHECK:       # %bb.0: # %entry
4155; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
4156; CHECK-NEXT:    vmv1r.v v9, v8
4157; CHECK-NEXT:    vmv1r.v v10, v8
4158; CHECK-NEXT:    vmv1r.v v11, v8
4159; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
4160; CHECK-NEXT:    vsseg4e16.v v8, (a0), v0.t
4161; CHECK-NEXT:    ret
4162entry:
4163  tail call void @llvm.riscv.vsseg4.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl)
4164  ret void
4165}
4166
4167declare void @llvm.riscv.vsseg5.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half* , i32)
4168declare void @llvm.riscv.vsseg5.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i1>, i32)
4169
4170define void @test_vsseg5_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %vl) {
4171; CHECK-LABEL: test_vsseg5_nxv2f16:
4172; CHECK:       # %bb.0: # %entry
4173; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
4174; CHECK-NEXT:    vmv1r.v v9, v8
4175; CHECK-NEXT:    vmv1r.v v10, v8
4176; CHECK-NEXT:    vmv1r.v v11, v8
4177; CHECK-NEXT:    vmv1r.v v12, v8
4178; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
4179; CHECK-NEXT:    vsseg5e16.v v8, (a0)
4180; CHECK-NEXT:    ret
4181entry:
4182  tail call void @llvm.riscv.vsseg5.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i32 %vl)
4183  ret void
4184}
4185
4186define void @test_vsseg5_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl) {
4187; CHECK-LABEL: test_vsseg5_mask_nxv2f16:
4188; CHECK:       # %bb.0: # %entry
4189; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
4190; CHECK-NEXT:    vmv1r.v v9, v8
4191; CHECK-NEXT:    vmv1r.v v10, v8
4192; CHECK-NEXT:    vmv1r.v v11, v8
4193; CHECK-NEXT:    vmv1r.v v12, v8
4194; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
4195; CHECK-NEXT:    vsseg5e16.v v8, (a0), v0.t
4196; CHECK-NEXT:    ret
4197entry:
4198  tail call void @llvm.riscv.vsseg5.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl)
4199  ret void
4200}
4201
4202declare void @llvm.riscv.vsseg6.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half* , i32)
4203declare void @llvm.riscv.vsseg6.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i1>, i32)
4204
4205define void @test_vsseg6_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %vl) {
4206; CHECK-LABEL: test_vsseg6_nxv2f16:
4207; CHECK:       # %bb.0: # %entry
4208; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
4209; CHECK-NEXT:    vmv1r.v v9, v8
4210; CHECK-NEXT:    vmv1r.v v10, v8
4211; CHECK-NEXT:    vmv1r.v v11, v8
4212; CHECK-NEXT:    vmv1r.v v12, v8
4213; CHECK-NEXT:    vmv1r.v v13, v8
4214; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
4215; CHECK-NEXT:    vsseg6e16.v v8, (a0)
4216; CHECK-NEXT:    ret
4217entry:
4218  tail call void @llvm.riscv.vsseg6.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i32 %vl)
4219  ret void
4220}
4221
4222define void @test_vsseg6_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl) {
4223; CHECK-LABEL: test_vsseg6_mask_nxv2f16:
4224; CHECK:       # %bb.0: # %entry
4225; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
4226; CHECK-NEXT:    vmv1r.v v9, v8
4227; CHECK-NEXT:    vmv1r.v v10, v8
4228; CHECK-NEXT:    vmv1r.v v11, v8
4229; CHECK-NEXT:    vmv1r.v v12, v8
4230; CHECK-NEXT:    vmv1r.v v13, v8
4231; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
4232; CHECK-NEXT:    vsseg6e16.v v8, (a0), v0.t
4233; CHECK-NEXT:    ret
4234entry:
4235  tail call void @llvm.riscv.vsseg6.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl)
4236  ret void
4237}
4238
4239declare void @llvm.riscv.vsseg7.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half* , i32)
4240declare void @llvm.riscv.vsseg7.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i1>, i32)
4241
4242define void @test_vsseg7_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %vl) {
4243; CHECK-LABEL: test_vsseg7_nxv2f16:
4244; CHECK:       # %bb.0: # %entry
4245; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
4246; CHECK-NEXT:    vmv1r.v v9, v8
4247; CHECK-NEXT:    vmv1r.v v10, v8
4248; CHECK-NEXT:    vmv1r.v v11, v8
4249; CHECK-NEXT:    vmv1r.v v12, v8
4250; CHECK-NEXT:    vmv1r.v v13, v8
4251; CHECK-NEXT:    vmv1r.v v14, v8
4252; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
4253; CHECK-NEXT:    vsseg7e16.v v8, (a0)
4254; CHECK-NEXT:    ret
4255entry:
4256  tail call void @llvm.riscv.vsseg7.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i32 %vl)
4257  ret void
4258}
4259
4260define void @test_vsseg7_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl) {
4261; CHECK-LABEL: test_vsseg7_mask_nxv2f16:
4262; CHECK:       # %bb.0: # %entry
4263; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
4264; CHECK-NEXT:    vmv1r.v v9, v8
4265; CHECK-NEXT:    vmv1r.v v10, v8
4266; CHECK-NEXT:    vmv1r.v v11, v8
4267; CHECK-NEXT:    vmv1r.v v12, v8
4268; CHECK-NEXT:    vmv1r.v v13, v8
4269; CHECK-NEXT:    vmv1r.v v14, v8
4270; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
4271; CHECK-NEXT:    vsseg7e16.v v8, (a0), v0.t
4272; CHECK-NEXT:    ret
4273entry:
4274  tail call void @llvm.riscv.vsseg7.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl)
4275  ret void
4276}
4277
4278declare void @llvm.riscv.vsseg8.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half* , i32)
4279declare void @llvm.riscv.vsseg8.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i1>, i32)
4280
4281define void @test_vsseg8_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %vl) {
4282; CHECK-LABEL: test_vsseg8_nxv2f16:
4283; CHECK:       # %bb.0: # %entry
4284; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
4285; CHECK-NEXT:    vmv1r.v v9, v8
4286; CHECK-NEXT:    vmv1r.v v10, v8
4287; CHECK-NEXT:    vmv1r.v v11, v8
4288; CHECK-NEXT:    vmv1r.v v12, v8
4289; CHECK-NEXT:    vmv1r.v v13, v8
4290; CHECK-NEXT:    vmv1r.v v14, v8
4291; CHECK-NEXT:    vmv1r.v v15, v8
4292; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
4293; CHECK-NEXT:    vsseg8e16.v v8, (a0)
4294; CHECK-NEXT:    ret
4295entry:
4296  tail call void @llvm.riscv.vsseg8.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i32 %vl)
4297  ret void
4298}
4299
4300define void @test_vsseg8_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl) {
4301; CHECK-LABEL: test_vsseg8_mask_nxv2f16:
4302; CHECK:       # %bb.0: # %entry
4303; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
4304; CHECK-NEXT:    vmv1r.v v9, v8
4305; CHECK-NEXT:    vmv1r.v v10, v8
4306; CHECK-NEXT:    vmv1r.v v11, v8
4307; CHECK-NEXT:    vmv1r.v v12, v8
4308; CHECK-NEXT:    vmv1r.v v13, v8
4309; CHECK-NEXT:    vmv1r.v v14, v8
4310; CHECK-NEXT:    vmv1r.v v15, v8
4311; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
4312; CHECK-NEXT:    vsseg8e16.v v8, (a0), v0.t
4313; CHECK-NEXT:    ret
4314entry:
4315  tail call void @llvm.riscv.vsseg8.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl)
4316  ret void
4317}
4318
4319declare void @llvm.riscv.vsseg2.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>, float* , i32)
4320declare void @llvm.riscv.vsseg2.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i1>, i32)
4321
4322define void @test_vsseg2_nxv4f32(<vscale x 4 x float> %val, float* %base, i32 %vl) {
4323; CHECK-LABEL: test_vsseg2_nxv4f32:
4324; CHECK:       # %bb.0: # %entry
4325; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
4326; CHECK-NEXT:    vmv2r.v v10, v8
4327; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
4328; CHECK-NEXT:    vsseg2e32.v v8, (a0)
4329; CHECK-NEXT:    ret
4330entry:
4331  tail call void @llvm.riscv.vsseg2.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, i32 %vl)
4332  ret void
4333}
4334
4335define void @test_vsseg2_mask_nxv4f32(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i1> %mask, i32 %vl) {
4336; CHECK-LABEL: test_vsseg2_mask_nxv4f32:
4337; CHECK:       # %bb.0: # %entry
4338; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
4339; CHECK-NEXT:    vmv2r.v v10, v8
4340; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
4341; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
4342; CHECK-NEXT:    ret
4343entry:
4344  tail call void @llvm.riscv.vsseg2.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i1> %mask, i32 %vl)
4345  ret void
4346}
4347
4348declare void @llvm.riscv.vsseg3.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float* , i32)
4349declare void @llvm.riscv.vsseg3.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i1>, i32)
4350
4351define void @test_vsseg3_nxv4f32(<vscale x 4 x float> %val, float* %base, i32 %vl) {
4352; CHECK-LABEL: test_vsseg3_nxv4f32:
4353; CHECK:       # %bb.0: # %entry
4354; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
4355; CHECK-NEXT:    vmv2r.v v10, v8
4356; CHECK-NEXT:    vmv2r.v v12, v8
4357; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
4358; CHECK-NEXT:    vsseg3e32.v v8, (a0)
4359; CHECK-NEXT:    ret
4360entry:
4361  tail call void @llvm.riscv.vsseg3.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, i32 %vl)
4362  ret void
4363}
4364
4365define void @test_vsseg3_mask_nxv4f32(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i1> %mask, i32 %vl) {
4366; CHECK-LABEL: test_vsseg3_mask_nxv4f32:
4367; CHECK:       # %bb.0: # %entry
4368; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
4369; CHECK-NEXT:    vmv2r.v v10, v8
4370; CHECK-NEXT:    vmv2r.v v12, v8
4371; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
4372; CHECK-NEXT:    vsseg3e32.v v8, (a0), v0.t
4373; CHECK-NEXT:    ret
4374entry:
4375  tail call void @llvm.riscv.vsseg3.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i1> %mask, i32 %vl)
4376  ret void
4377}
4378
4379declare void @llvm.riscv.vsseg4.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float* , i32)
4380declare void @llvm.riscv.vsseg4.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i1>, i32)
4381
4382define void @test_vsseg4_nxv4f32(<vscale x 4 x float> %val, float* %base, i32 %vl) {
4383; CHECK-LABEL: test_vsseg4_nxv4f32:
4384; CHECK:       # %bb.0: # %entry
4385; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
4386; CHECK-NEXT:    vmv2r.v v10, v8
4387; CHECK-NEXT:    vmv2r.v v12, v8
4388; CHECK-NEXT:    vmv2r.v v14, v8
4389; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
4390; CHECK-NEXT:    vsseg4e32.v v8, (a0)
4391; CHECK-NEXT:    ret
4392entry:
4393  tail call void @llvm.riscv.vsseg4.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, i32 %vl)
4394  ret void
4395}
4396
4397define void @test_vsseg4_mask_nxv4f32(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i1> %mask, i32 %vl) {
4398; CHECK-LABEL: test_vsseg4_mask_nxv4f32:
4399; CHECK:       # %bb.0: # %entry
4400; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
4401; CHECK-NEXT:    vmv2r.v v10, v8
4402; CHECK-NEXT:    vmv2r.v v12, v8
4403; CHECK-NEXT:    vmv2r.v v14, v8
4404; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
4405; CHECK-NEXT:    vsseg4e32.v v8, (a0), v0.t
4406; CHECK-NEXT:    ret
4407entry:
4408  tail call void @llvm.riscv.vsseg4.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i1> %mask, i32 %vl)
4409  ret void
4410}
4411