1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
4 // RUN:   -target-feature +experimental-zfh -target-feature +experimental-v \
5 // RUN:   -target-feature +experimental-zvlsseg -disable-O0-optnone -emit-llvm %s \
6 // RUN:   -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
7 
8 #include <riscv_vector.h>
9 
10 // CHECK-RV64-LABEL: @test_vsseg2e8_v_i8mf8(
11 // CHECK-RV64-NEXT:  entry:
12 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
13 // CHECK-RV64-NEXT:    ret void
14 //
test_vsseg2e8_v_i8mf8(int8_t * base,vint8mf8_t v0,vint8mf8_t v1,size_t vl)15 void test_vsseg2e8_v_i8mf8 (int8_t *base, vint8mf8_t v0, vint8mf8_t v1, size_t vl) {
16   return vsseg2e8_v_i8mf8(base, v0, v1, vl);
17 }
18 
19 // CHECK-RV64-LABEL: @test_vsseg3e8_v_i8mf8(
20 // CHECK-RV64-NEXT:  entry:
21 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], <vscale x 1 x i8> [[V2:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
22 // CHECK-RV64-NEXT:    ret void
23 //
test_vsseg3e8_v_i8mf8(int8_t * base,vint8mf8_t v0,vint8mf8_t v1,vint8mf8_t v2,size_t vl)24 void test_vsseg3e8_v_i8mf8 (int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) {
25   return vsseg3e8_v_i8mf8(base, v0, v1, v2, vl);
26 }
27 
28 // CHECK-RV64-LABEL: @test_vsseg4e8_v_i8mf8(
29 // CHECK-RV64-NEXT:  entry:
30 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], <vscale x 1 x i8> [[V2:%.*]], <vscale x 1 x i8> [[V3:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
31 // CHECK-RV64-NEXT:    ret void
32 //
test_vsseg4e8_v_i8mf8(int8_t * base,vint8mf8_t v0,vint8mf8_t v1,vint8mf8_t v2,vint8mf8_t v3,size_t vl)33 void test_vsseg4e8_v_i8mf8 (int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) {
34   return vsseg4e8_v_i8mf8(base, v0, v1, v2, v3, vl);
35 }
36 
37 // CHECK-RV64-LABEL: @test_vsseg5e8_v_i8mf8(
38 // CHECK-RV64-NEXT:  entry:
39 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], <vscale x 1 x i8> [[V2:%.*]], <vscale x 1 x i8> [[V3:%.*]], <vscale x 1 x i8> [[V4:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
40 // CHECK-RV64-NEXT:    ret void
41 //
test_vsseg5e8_v_i8mf8(int8_t * base,vint8mf8_t v0,vint8mf8_t v1,vint8mf8_t v2,vint8mf8_t v3,vint8mf8_t v4,size_t vl)42 void test_vsseg5e8_v_i8mf8 (int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) {
43   return vsseg5e8_v_i8mf8(base, v0, v1, v2, v3, v4, vl);
44 }
45 
46 // CHECK-RV64-LABEL: @test_vsseg6e8_v_i8mf8(
47 // CHECK-RV64-NEXT:  entry:
48 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], <vscale x 1 x i8> [[V2:%.*]], <vscale x 1 x i8> [[V3:%.*]], <vscale x 1 x i8> [[V4:%.*]], <vscale x 1 x i8> [[V5:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
49 // CHECK-RV64-NEXT:    ret void
50 //
test_vsseg6e8_v_i8mf8(int8_t * base,vint8mf8_t v0,vint8mf8_t v1,vint8mf8_t v2,vint8mf8_t v3,vint8mf8_t v4,vint8mf8_t v5,size_t vl)51 void test_vsseg6e8_v_i8mf8 (int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) {
52   return vsseg6e8_v_i8mf8(base, v0, v1, v2, v3, v4, v5, vl);
53 }
54 
55 // CHECK-RV64-LABEL: @test_vsseg7e8_v_i8mf8(
56 // CHECK-RV64-NEXT:  entry:
57 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], <vscale x 1 x i8> [[V2:%.*]], <vscale x 1 x i8> [[V3:%.*]], <vscale x 1 x i8> [[V4:%.*]], <vscale x 1 x i8> [[V5:%.*]], <vscale x 1 x i8> [[V6:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
58 // CHECK-RV64-NEXT:    ret void
59 //
test_vsseg7e8_v_i8mf8(int8_t * base,vint8mf8_t v0,vint8mf8_t v1,vint8mf8_t v2,vint8mf8_t v3,vint8mf8_t v4,vint8mf8_t v5,vint8mf8_t v6,size_t vl)60 void test_vsseg7e8_v_i8mf8 (int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) {
61   return vsseg7e8_v_i8mf8(base, v0, v1, v2, v3, v4, v5, v6, vl);
62 }
63 
64 // CHECK-RV64-LABEL: @test_vsseg8e8_v_i8mf8(
65 // CHECK-RV64-NEXT:  entry:
66 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], <vscale x 1 x i8> [[V2:%.*]], <vscale x 1 x i8> [[V3:%.*]], <vscale x 1 x i8> [[V4:%.*]], <vscale x 1 x i8> [[V5:%.*]], <vscale x 1 x i8> [[V6:%.*]], <vscale x 1 x i8> [[V7:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
67 // CHECK-RV64-NEXT:    ret void
68 //
test_vsseg8e8_v_i8mf8(int8_t * base,vint8mf8_t v0,vint8mf8_t v1,vint8mf8_t v2,vint8mf8_t v3,vint8mf8_t v4,vint8mf8_t v5,vint8mf8_t v6,vint8mf8_t v7,size_t vl)69 void test_vsseg8e8_v_i8mf8 (int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) {
70   return vsseg8e8_v_i8mf8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
71 }
72 
73 // CHECK-RV64-LABEL: @test_vsseg2e8_v_i8mf4(
74 // CHECK-RV64-NEXT:  entry:
75 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
76 // CHECK-RV64-NEXT:    ret void
77 //
test_vsseg2e8_v_i8mf4(int8_t * base,vint8mf4_t v0,vint8mf4_t v1,size_t vl)78 void test_vsseg2e8_v_i8mf4 (int8_t *base, vint8mf4_t v0, vint8mf4_t v1, size_t vl) {
79   return vsseg2e8_v_i8mf4(base, v0, v1, vl);
80 }
81 
82 // CHECK-RV64-LABEL: @test_vsseg3e8_v_i8mf4(
83 // CHECK-RV64-NEXT:  entry:
84 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], <vscale x 2 x i8> [[V2:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
85 // CHECK-RV64-NEXT:    ret void
86 //
test_vsseg3e8_v_i8mf4(int8_t * base,vint8mf4_t v0,vint8mf4_t v1,vint8mf4_t v2,size_t vl)87 void test_vsseg3e8_v_i8mf4 (int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) {
88   return vsseg3e8_v_i8mf4(base, v0, v1, v2, vl);
89 }
90 
91 // CHECK-RV64-LABEL: @test_vsseg4e8_v_i8mf4(
92 // CHECK-RV64-NEXT:  entry:
93 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], <vscale x 2 x i8> [[V2:%.*]], <vscale x 2 x i8> [[V3:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
94 // CHECK-RV64-NEXT:    ret void
95 //
test_vsseg4e8_v_i8mf4(int8_t * base,vint8mf4_t v0,vint8mf4_t v1,vint8mf4_t v2,vint8mf4_t v3,size_t vl)96 void test_vsseg4e8_v_i8mf4 (int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) {
97   return vsseg4e8_v_i8mf4(base, v0, v1, v2, v3, vl);
98 }
99 
100 // CHECK-RV64-LABEL: @test_vsseg5e8_v_i8mf4(
101 // CHECK-RV64-NEXT:  entry:
102 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], <vscale x 2 x i8> [[V2:%.*]], <vscale x 2 x i8> [[V3:%.*]], <vscale x 2 x i8> [[V4:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
103 // CHECK-RV64-NEXT:    ret void
104 //
test_vsseg5e8_v_i8mf4(int8_t * base,vint8mf4_t v0,vint8mf4_t v1,vint8mf4_t v2,vint8mf4_t v3,vint8mf4_t v4,size_t vl)105 void test_vsseg5e8_v_i8mf4 (int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) {
106   return vsseg5e8_v_i8mf4(base, v0, v1, v2, v3, v4, vl);
107 }
108 
109 // CHECK-RV64-LABEL: @test_vsseg6e8_v_i8mf4(
110 // CHECK-RV64-NEXT:  entry:
111 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], <vscale x 2 x i8> [[V2:%.*]], <vscale x 2 x i8> [[V3:%.*]], <vscale x 2 x i8> [[V4:%.*]], <vscale x 2 x i8> [[V5:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
112 // CHECK-RV64-NEXT:    ret void
113 //
test_vsseg6e8_v_i8mf4(int8_t * base,vint8mf4_t v0,vint8mf4_t v1,vint8mf4_t v2,vint8mf4_t v3,vint8mf4_t v4,vint8mf4_t v5,size_t vl)114 void test_vsseg6e8_v_i8mf4 (int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) {
115   return vsseg6e8_v_i8mf4(base, v0, v1, v2, v3, v4, v5, vl);
116 }
117 
118 // CHECK-RV64-LABEL: @test_vsseg7e8_v_i8mf4(
119 // CHECK-RV64-NEXT:  entry:
120 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], <vscale x 2 x i8> [[V2:%.*]], <vscale x 2 x i8> [[V3:%.*]], <vscale x 2 x i8> [[V4:%.*]], <vscale x 2 x i8> [[V5:%.*]], <vscale x 2 x i8> [[V6:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
121 // CHECK-RV64-NEXT:    ret void
122 //
test_vsseg7e8_v_i8mf4(int8_t * base,vint8mf4_t v0,vint8mf4_t v1,vint8mf4_t v2,vint8mf4_t v3,vint8mf4_t v4,vint8mf4_t v5,vint8mf4_t v6,size_t vl)123 void test_vsseg7e8_v_i8mf4 (int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) {
124   return vsseg7e8_v_i8mf4(base, v0, v1, v2, v3, v4, v5, v6, vl);
125 }
126 
127 // CHECK-RV64-LABEL: @test_vsseg8e8_v_i8mf4(
128 // CHECK-RV64-NEXT:  entry:
129 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], <vscale x 2 x i8> [[V2:%.*]], <vscale x 2 x i8> [[V3:%.*]], <vscale x 2 x i8> [[V4:%.*]], <vscale x 2 x i8> [[V5:%.*]], <vscale x 2 x i8> [[V6:%.*]], <vscale x 2 x i8> [[V7:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
130 // CHECK-RV64-NEXT:    ret void
131 //
test_vsseg8e8_v_i8mf4(int8_t * base,vint8mf4_t v0,vint8mf4_t v1,vint8mf4_t v2,vint8mf4_t v3,vint8mf4_t v4,vint8mf4_t v5,vint8mf4_t v6,vint8mf4_t v7,size_t vl)132 void test_vsseg8e8_v_i8mf4 (int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) {
133   return vsseg8e8_v_i8mf4(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
134 }
135 
136 // CHECK-RV64-LABEL: @test_vsseg2e8_v_i8mf2(
137 // CHECK-RV64-NEXT:  entry:
138 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
139 // CHECK-RV64-NEXT:    ret void
140 //
test_vsseg2e8_v_i8mf2(int8_t * base,vint8mf2_t v0,vint8mf2_t v1,size_t vl)141 void test_vsseg2e8_v_i8mf2 (int8_t *base, vint8mf2_t v0, vint8mf2_t v1, size_t vl) {
142   return vsseg2e8_v_i8mf2(base, v0, v1, vl);
143 }
144 
145 // CHECK-RV64-LABEL: @test_vsseg3e8_v_i8mf2(
146 // CHECK-RV64-NEXT:  entry:
147 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], <vscale x 4 x i8> [[V2:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
148 // CHECK-RV64-NEXT:    ret void
149 //
test_vsseg3e8_v_i8mf2(int8_t * base,vint8mf2_t v0,vint8mf2_t v1,vint8mf2_t v2,size_t vl)150 void test_vsseg3e8_v_i8mf2 (int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) {
151   return vsseg3e8_v_i8mf2(base, v0, v1, v2, vl);
152 }
153 
154 // CHECK-RV64-LABEL: @test_vsseg4e8_v_i8mf2(
155 // CHECK-RV64-NEXT:  entry:
156 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], <vscale x 4 x i8> [[V2:%.*]], <vscale x 4 x i8> [[V3:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
157 // CHECK-RV64-NEXT:    ret void
158 //
test_vsseg4e8_v_i8mf2(int8_t * base,vint8mf2_t v0,vint8mf2_t v1,vint8mf2_t v2,vint8mf2_t v3,size_t vl)159 void test_vsseg4e8_v_i8mf2 (int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) {
160   return vsseg4e8_v_i8mf2(base, v0, v1, v2, v3, vl);
161 }
162 
163 // CHECK-RV64-LABEL: @test_vsseg5e8_v_i8mf2(
164 // CHECK-RV64-NEXT:  entry:
165 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], <vscale x 4 x i8> [[V2:%.*]], <vscale x 4 x i8> [[V3:%.*]], <vscale x 4 x i8> [[V4:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
166 // CHECK-RV64-NEXT:    ret void
167 //
test_vsseg5e8_v_i8mf2(int8_t * base,vint8mf2_t v0,vint8mf2_t v1,vint8mf2_t v2,vint8mf2_t v3,vint8mf2_t v4,size_t vl)168 void test_vsseg5e8_v_i8mf2 (int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) {
169   return vsseg5e8_v_i8mf2(base, v0, v1, v2, v3, v4, vl);
170 }
171 
172 // CHECK-RV64-LABEL: @test_vsseg6e8_v_i8mf2(
173 // CHECK-RV64-NEXT:  entry:
174 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], <vscale x 4 x i8> [[V2:%.*]], <vscale x 4 x i8> [[V3:%.*]], <vscale x 4 x i8> [[V4:%.*]], <vscale x 4 x i8> [[V5:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
175 // CHECK-RV64-NEXT:    ret void
176 //
test_vsseg6e8_v_i8mf2(int8_t * base,vint8mf2_t v0,vint8mf2_t v1,vint8mf2_t v2,vint8mf2_t v3,vint8mf2_t v4,vint8mf2_t v5,size_t vl)177 void test_vsseg6e8_v_i8mf2 (int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) {
178   return vsseg6e8_v_i8mf2(base, v0, v1, v2, v3, v4, v5, vl);
179 }
180 
181 // CHECK-RV64-LABEL: @test_vsseg7e8_v_i8mf2(
182 // CHECK-RV64-NEXT:  entry:
183 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], <vscale x 4 x i8> [[V2:%.*]], <vscale x 4 x i8> [[V3:%.*]], <vscale x 4 x i8> [[V4:%.*]], <vscale x 4 x i8> [[V5:%.*]], <vscale x 4 x i8> [[V6:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
184 // CHECK-RV64-NEXT:    ret void
185 //
test_vsseg7e8_v_i8mf2(int8_t * base,vint8mf2_t v0,vint8mf2_t v1,vint8mf2_t v2,vint8mf2_t v3,vint8mf2_t v4,vint8mf2_t v5,vint8mf2_t v6,size_t vl)186 void test_vsseg7e8_v_i8mf2 (int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) {
187   return vsseg7e8_v_i8mf2(base, v0, v1, v2, v3, v4, v5, v6, vl);
188 }
189 
190 // CHECK-RV64-LABEL: @test_vsseg8e8_v_i8mf2(
191 // CHECK-RV64-NEXT:  entry:
192 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], <vscale x 4 x i8> [[V2:%.*]], <vscale x 4 x i8> [[V3:%.*]], <vscale x 4 x i8> [[V4:%.*]], <vscale x 4 x i8> [[V5:%.*]], <vscale x 4 x i8> [[V6:%.*]], <vscale x 4 x i8> [[V7:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
193 // CHECK-RV64-NEXT:    ret void
194 //
test_vsseg8e8_v_i8mf2(int8_t * base,vint8mf2_t v0,vint8mf2_t v1,vint8mf2_t v2,vint8mf2_t v3,vint8mf2_t v4,vint8mf2_t v5,vint8mf2_t v6,vint8mf2_t v7,size_t vl)195 void test_vsseg8e8_v_i8mf2 (int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) {
196   return vsseg8e8_v_i8mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
197 }
198 
199 // CHECK-RV64-LABEL: @test_vsseg2e8_v_i8m1(
200 // CHECK-RV64-NEXT:  entry:
201 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
202 // CHECK-RV64-NEXT:    ret void
203 //
test_vsseg2e8_v_i8m1(int8_t * base,vint8m1_t v0,vint8m1_t v1,size_t vl)204 void test_vsseg2e8_v_i8m1 (int8_t *base, vint8m1_t v0, vint8m1_t v1, size_t vl) {
205   return vsseg2e8_v_i8m1(base, v0, v1, vl);
206 }
207 
208 // CHECK-RV64-LABEL: @test_vsseg3e8_v_i8m1(
209 // CHECK-RV64-NEXT:  entry:
210 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], <vscale x 8 x i8> [[V2:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
211 // CHECK-RV64-NEXT:    ret void
212 //
test_vsseg3e8_v_i8m1(int8_t * base,vint8m1_t v0,vint8m1_t v1,vint8m1_t v2,size_t vl)213 void test_vsseg3e8_v_i8m1 (int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) {
214   return vsseg3e8_v_i8m1(base, v0, v1, v2, vl);
215 }
216 
217 // CHECK-RV64-LABEL: @test_vsseg4e8_v_i8m1(
218 // CHECK-RV64-NEXT:  entry:
219 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], <vscale x 8 x i8> [[V2:%.*]], <vscale x 8 x i8> [[V3:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
220 // CHECK-RV64-NEXT:    ret void
221 //
test_vsseg4e8_v_i8m1(int8_t * base,vint8m1_t v0,vint8m1_t v1,vint8m1_t v2,vint8m1_t v3,size_t vl)222 void test_vsseg4e8_v_i8m1 (int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) {
223   return vsseg4e8_v_i8m1(base, v0, v1, v2, v3, vl);
224 }
225 
226 // CHECK-RV64-LABEL: @test_vsseg5e8_v_i8m1(
227 // CHECK-RV64-NEXT:  entry:
228 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], <vscale x 8 x i8> [[V2:%.*]], <vscale x 8 x i8> [[V3:%.*]], <vscale x 8 x i8> [[V4:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
229 // CHECK-RV64-NEXT:    ret void
230 //
test_vsseg5e8_v_i8m1(int8_t * base,vint8m1_t v0,vint8m1_t v1,vint8m1_t v2,vint8m1_t v3,vint8m1_t v4,size_t vl)231 void test_vsseg5e8_v_i8m1 (int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) {
232   return vsseg5e8_v_i8m1(base, v0, v1, v2, v3, v4, vl);
233 }
234 
235 // CHECK-RV64-LABEL: @test_vsseg6e8_v_i8m1(
236 // CHECK-RV64-NEXT:  entry:
237 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], <vscale x 8 x i8> [[V2:%.*]], <vscale x 8 x i8> [[V3:%.*]], <vscale x 8 x i8> [[V4:%.*]], <vscale x 8 x i8> [[V5:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
238 // CHECK-RV64-NEXT:    ret void
239 //
test_vsseg6e8_v_i8m1(int8_t * base,vint8m1_t v0,vint8m1_t v1,vint8m1_t v2,vint8m1_t v3,vint8m1_t v4,vint8m1_t v5,size_t vl)240 void test_vsseg6e8_v_i8m1 (int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) {
241   return vsseg6e8_v_i8m1(base, v0, v1, v2, v3, v4, v5, vl);
242 }
243 
244 // CHECK-RV64-LABEL: @test_vsseg7e8_v_i8m1(
245 // CHECK-RV64-NEXT:  entry:
246 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], <vscale x 8 x i8> [[V2:%.*]], <vscale x 8 x i8> [[V3:%.*]], <vscale x 8 x i8> [[V4:%.*]], <vscale x 8 x i8> [[V5:%.*]], <vscale x 8 x i8> [[V6:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
247 // CHECK-RV64-NEXT:    ret void
248 //
test_vsseg7e8_v_i8m1(int8_t * base,vint8m1_t v0,vint8m1_t v1,vint8m1_t v2,vint8m1_t v3,vint8m1_t v4,vint8m1_t v5,vint8m1_t v6,size_t vl)249 void test_vsseg7e8_v_i8m1 (int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) {
250   return vsseg7e8_v_i8m1(base, v0, v1, v2, v3, v4, v5, v6, vl);
251 }
252 
253 // CHECK-RV64-LABEL: @test_vsseg8e8_v_i8m1(
254 // CHECK-RV64-NEXT:  entry:
255 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], <vscale x 8 x i8> [[V2:%.*]], <vscale x 8 x i8> [[V3:%.*]], <vscale x 8 x i8> [[V4:%.*]], <vscale x 8 x i8> [[V5:%.*]], <vscale x 8 x i8> [[V6:%.*]], <vscale x 8 x i8> [[V7:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
256 // CHECK-RV64-NEXT:    ret void
257 //
test_vsseg8e8_v_i8m1(int8_t * base,vint8m1_t v0,vint8m1_t v1,vint8m1_t v2,vint8m1_t v3,vint8m1_t v4,vint8m1_t v5,vint8m1_t v6,vint8m1_t v7,size_t vl)258 void test_vsseg8e8_v_i8m1 (int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) {
259   return vsseg8e8_v_i8m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
260 }
261 
262 // CHECK-RV64-LABEL: @test_vsseg2e8_v_i8m2(
263 // CHECK-RV64-NEXT:  entry:
264 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv16i8.i64(<vscale x 16 x i8> [[V0:%.*]], <vscale x 16 x i8> [[V1:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
265 // CHECK-RV64-NEXT:    ret void
266 //
test_vsseg2e8_v_i8m2(int8_t * base,vint8m2_t v0,vint8m2_t v1,size_t vl)267 void test_vsseg2e8_v_i8m2 (int8_t *base, vint8m2_t v0, vint8m2_t v1, size_t vl) {
268   return vsseg2e8_v_i8m2(base, v0, v1, vl);
269 }
270 
271 // CHECK-RV64-LABEL: @test_vsseg3e8_v_i8m2(
272 // CHECK-RV64-NEXT:  entry:
273 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv16i8.i64(<vscale x 16 x i8> [[V0:%.*]], <vscale x 16 x i8> [[V1:%.*]], <vscale x 16 x i8> [[V2:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
274 // CHECK-RV64-NEXT:    ret void
275 //
test_vsseg3e8_v_i8m2(int8_t * base,vint8m2_t v0,vint8m2_t v1,vint8m2_t v2,size_t vl)276 void test_vsseg3e8_v_i8m2 (int8_t *base, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) {
277   return vsseg3e8_v_i8m2(base, v0, v1, v2, vl);
278 }
279 
280 // CHECK-RV64-LABEL: @test_vsseg4e8_v_i8m2(
281 // CHECK-RV64-NEXT:  entry:
282 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv16i8.i64(<vscale x 16 x i8> [[V0:%.*]], <vscale x 16 x i8> [[V1:%.*]], <vscale x 16 x i8> [[V2:%.*]], <vscale x 16 x i8> [[V3:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
283 // CHECK-RV64-NEXT:    ret void
284 //
test_vsseg4e8_v_i8m2(int8_t * base,vint8m2_t v0,vint8m2_t v1,vint8m2_t v2,vint8m2_t v3,size_t vl)285 void test_vsseg4e8_v_i8m2 (int8_t *base, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) {
286   return vsseg4e8_v_i8m2(base, v0, v1, v2, v3, vl);
287 }
288 
289 // CHECK-RV64-LABEL: @test_vsseg2e8_v_i8m4(
290 // CHECK-RV64-NEXT:  entry:
291 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv32i8.i64(<vscale x 32 x i8> [[V0:%.*]], <vscale x 32 x i8> [[V1:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
292 // CHECK-RV64-NEXT:    ret void
293 //
test_vsseg2e8_v_i8m4(int8_t * base,vint8m4_t v0,vint8m4_t v1,size_t vl)294 void test_vsseg2e8_v_i8m4 (int8_t *base, vint8m4_t v0, vint8m4_t v1, size_t vl) {
295   return vsseg2e8_v_i8m4(base, v0, v1, vl);
296 }
297 
298 // CHECK-RV64-LABEL: @test_vsseg2e16_v_i16mf4(
299 // CHECK-RV64-NEXT:  entry:
300 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
301 // CHECK-RV64-NEXT:    ret void
302 //
test_vsseg2e16_v_i16mf4(int16_t * base,vint16mf4_t v0,vint16mf4_t v1,size_t vl)303 void test_vsseg2e16_v_i16mf4 (int16_t *base, vint16mf4_t v0, vint16mf4_t v1, size_t vl) {
304   return vsseg2e16_v_i16mf4(base, v0, v1, vl);
305 }
306 
307 // CHECK-RV64-LABEL: @test_vsseg3e16_v_i16mf4(
308 // CHECK-RV64-NEXT:  entry:
309 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], <vscale x 1 x i16> [[V2:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
310 // CHECK-RV64-NEXT:    ret void
311 //
test_vsseg3e16_v_i16mf4(int16_t * base,vint16mf4_t v0,vint16mf4_t v1,vint16mf4_t v2,size_t vl)312 void test_vsseg3e16_v_i16mf4 (int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) {
313   return vsseg3e16_v_i16mf4(base, v0, v1, v2, vl);
314 }
315 
316 // CHECK-RV64-LABEL: @test_vsseg4e16_v_i16mf4(
317 // CHECK-RV64-NEXT:  entry:
318 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], <vscale x 1 x i16> [[V2:%.*]], <vscale x 1 x i16> [[V3:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
319 // CHECK-RV64-NEXT:    ret void
320 //
test_vsseg4e16_v_i16mf4(int16_t * base,vint16mf4_t v0,vint16mf4_t v1,vint16mf4_t v2,vint16mf4_t v3,size_t vl)321 void test_vsseg4e16_v_i16mf4 (int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) {
322   return vsseg4e16_v_i16mf4(base, v0, v1, v2, v3, vl);
323 }
324 
325 // CHECK-RV64-LABEL: @test_vsseg5e16_v_i16mf4(
326 // CHECK-RV64-NEXT:  entry:
327 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], <vscale x 1 x i16> [[V2:%.*]], <vscale x 1 x i16> [[V3:%.*]], <vscale x 1 x i16> [[V4:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
328 // CHECK-RV64-NEXT:    ret void
329 //
test_vsseg5e16_v_i16mf4(int16_t * base,vint16mf4_t v0,vint16mf4_t v1,vint16mf4_t v2,vint16mf4_t v3,vint16mf4_t v4,size_t vl)330 void test_vsseg5e16_v_i16mf4 (int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) {
331   return vsseg5e16_v_i16mf4(base, v0, v1, v2, v3, v4, vl);
332 }
333 
334 // CHECK-RV64-LABEL: @test_vsseg6e16_v_i16mf4(
335 // CHECK-RV64-NEXT:  entry:
336 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], <vscale x 1 x i16> [[V2:%.*]], <vscale x 1 x i16> [[V3:%.*]], <vscale x 1 x i16> [[V4:%.*]], <vscale x 1 x i16> [[V5:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
337 // CHECK-RV64-NEXT:    ret void
338 //
test_vsseg6e16_v_i16mf4(int16_t * base,vint16mf4_t v0,vint16mf4_t v1,vint16mf4_t v2,vint16mf4_t v3,vint16mf4_t v4,vint16mf4_t v5,size_t vl)339 void test_vsseg6e16_v_i16mf4 (int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) {
340   return vsseg6e16_v_i16mf4(base, v0, v1, v2, v3, v4, v5, vl);
341 }
342 
343 // CHECK-RV64-LABEL: @test_vsseg7e16_v_i16mf4(
344 // CHECK-RV64-NEXT:  entry:
345 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], <vscale x 1 x i16> [[V2:%.*]], <vscale x 1 x i16> [[V3:%.*]], <vscale x 1 x i16> [[V4:%.*]], <vscale x 1 x i16> [[V5:%.*]], <vscale x 1 x i16> [[V6:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
346 // CHECK-RV64-NEXT:    ret void
347 //
test_vsseg7e16_v_i16mf4(int16_t * base,vint16mf4_t v0,vint16mf4_t v1,vint16mf4_t v2,vint16mf4_t v3,vint16mf4_t v4,vint16mf4_t v5,vint16mf4_t v6,size_t vl)348 void test_vsseg7e16_v_i16mf4 (int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) {
349   return vsseg7e16_v_i16mf4(base, v0, v1, v2, v3, v4, v5, v6, vl);
350 }
351 
352 // CHECK-RV64-LABEL: @test_vsseg8e16_v_i16mf4(
353 // CHECK-RV64-NEXT:  entry:
354 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], <vscale x 1 x i16> [[V2:%.*]], <vscale x 1 x i16> [[V3:%.*]], <vscale x 1 x i16> [[V4:%.*]], <vscale x 1 x i16> [[V5:%.*]], <vscale x 1 x i16> [[V6:%.*]], <vscale x 1 x i16> [[V7:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
355 // CHECK-RV64-NEXT:    ret void
356 //
test_vsseg8e16_v_i16mf4(int16_t * base,vint16mf4_t v0,vint16mf4_t v1,vint16mf4_t v2,vint16mf4_t v3,vint16mf4_t v4,vint16mf4_t v5,vint16mf4_t v6,vint16mf4_t v7,size_t vl)357 void test_vsseg8e16_v_i16mf4 (int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) {
358   return vsseg8e16_v_i16mf4(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
359 }
360 
361 // CHECK-RV64-LABEL: @test_vsseg2e16_v_i16mf2(
362 // CHECK-RV64-NEXT:  entry:
363 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
364 // CHECK-RV64-NEXT:    ret void
365 //
test_vsseg2e16_v_i16mf2(int16_t * base,vint16mf2_t v0,vint16mf2_t v1,size_t vl)366 void test_vsseg2e16_v_i16mf2 (int16_t *base, vint16mf2_t v0, vint16mf2_t v1, size_t vl) {
367   return vsseg2e16_v_i16mf2(base, v0, v1, vl);
368 }
369 
370 // CHECK-RV64-LABEL: @test_vsseg3e16_v_i16mf2(
371 // CHECK-RV64-NEXT:  entry:
372 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], <vscale x 2 x i16> [[V2:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
373 // CHECK-RV64-NEXT:    ret void
374 //
test_vsseg3e16_v_i16mf2(int16_t * base,vint16mf2_t v0,vint16mf2_t v1,vint16mf2_t v2,size_t vl)375 void test_vsseg3e16_v_i16mf2 (int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) {
376   return vsseg3e16_v_i16mf2(base, v0, v1, v2, vl);
377 }
378 
379 // CHECK-RV64-LABEL: @test_vsseg4e16_v_i16mf2(
380 // CHECK-RV64-NEXT:  entry:
381 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], <vscale x 2 x i16> [[V2:%.*]], <vscale x 2 x i16> [[V3:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
382 // CHECK-RV64-NEXT:    ret void
383 //
test_vsseg4e16_v_i16mf2(int16_t * base,vint16mf2_t v0,vint16mf2_t v1,vint16mf2_t v2,vint16mf2_t v3,size_t vl)384 void test_vsseg4e16_v_i16mf2 (int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) {
385   return vsseg4e16_v_i16mf2(base, v0, v1, v2, v3, vl);
386 }
387 
388 // CHECK-RV64-LABEL: @test_vsseg5e16_v_i16mf2(
389 // CHECK-RV64-NEXT:  entry:
390 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], <vscale x 2 x i16> [[V2:%.*]], <vscale x 2 x i16> [[V3:%.*]], <vscale x 2 x i16> [[V4:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
391 // CHECK-RV64-NEXT:    ret void
392 //
test_vsseg5e16_v_i16mf2(int16_t * base,vint16mf2_t v0,vint16mf2_t v1,vint16mf2_t v2,vint16mf2_t v3,vint16mf2_t v4,size_t vl)393 void test_vsseg5e16_v_i16mf2 (int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) {
394   return vsseg5e16_v_i16mf2(base, v0, v1, v2, v3, v4, vl);
395 }
396 
397 // CHECK-RV64-LABEL: @test_vsseg6e16_v_i16mf2(
398 // CHECK-RV64-NEXT:  entry:
399 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], <vscale x 2 x i16> [[V2:%.*]], <vscale x 2 x i16> [[V3:%.*]], <vscale x 2 x i16> [[V4:%.*]], <vscale x 2 x i16> [[V5:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
400 // CHECK-RV64-NEXT:    ret void
401 //
test_vsseg6e16_v_i16mf2(int16_t * base,vint16mf2_t v0,vint16mf2_t v1,vint16mf2_t v2,vint16mf2_t v3,vint16mf2_t v4,vint16mf2_t v5,size_t vl)402 void test_vsseg6e16_v_i16mf2 (int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) {
403   return vsseg6e16_v_i16mf2(base, v0, v1, v2, v3, v4, v5, vl);
404 }
405 
406 // CHECK-RV64-LABEL: @test_vsseg7e16_v_i16mf2(
407 // CHECK-RV64-NEXT:  entry:
408 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], <vscale x 2 x i16> [[V2:%.*]], <vscale x 2 x i16> [[V3:%.*]], <vscale x 2 x i16> [[V4:%.*]], <vscale x 2 x i16> [[V5:%.*]], <vscale x 2 x i16> [[V6:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
409 // CHECK-RV64-NEXT:    ret void
410 //
test_vsseg7e16_v_i16mf2(int16_t * base,vint16mf2_t v0,vint16mf2_t v1,vint16mf2_t v2,vint16mf2_t v3,vint16mf2_t v4,vint16mf2_t v5,vint16mf2_t v6,size_t vl)411 void test_vsseg7e16_v_i16mf2 (int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) {
412   return vsseg7e16_v_i16mf2(base, v0, v1, v2, v3, v4, v5, v6, vl);
413 }
414 
415 // CHECK-RV64-LABEL: @test_vsseg8e16_v_i16mf2(
416 // CHECK-RV64-NEXT:  entry:
417 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], <vscale x 2 x i16> [[V2:%.*]], <vscale x 2 x i16> [[V3:%.*]], <vscale x 2 x i16> [[V4:%.*]], <vscale x 2 x i16> [[V5:%.*]], <vscale x 2 x i16> [[V6:%.*]], <vscale x 2 x i16> [[V7:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
418 // CHECK-RV64-NEXT:    ret void
419 //
test_vsseg8e16_v_i16mf2(int16_t * base,vint16mf2_t v0,vint16mf2_t v1,vint16mf2_t v2,vint16mf2_t v3,vint16mf2_t v4,vint16mf2_t v5,vint16mf2_t v6,vint16mf2_t v7,size_t vl)420 void test_vsseg8e16_v_i16mf2 (int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) {
421   return vsseg8e16_v_i16mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
422 }
423 
424 // CHECK-RV64-LABEL: @test_vsseg2e16_v_i16m1(
425 // CHECK-RV64-NEXT:  entry:
426 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
427 // CHECK-RV64-NEXT:    ret void
428 //
test_vsseg2e16_v_i16m1(int16_t * base,vint16m1_t v0,vint16m1_t v1,size_t vl)429 void test_vsseg2e16_v_i16m1 (int16_t *base, vint16m1_t v0, vint16m1_t v1, size_t vl) {
430   return vsseg2e16_v_i16m1(base, v0, v1, vl);
431 }
432 
433 // CHECK-RV64-LABEL: @test_vsseg3e16_v_i16m1(
434 // CHECK-RV64-NEXT:  entry:
435 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], <vscale x 4 x i16> [[V2:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
436 // CHECK-RV64-NEXT:    ret void
437 //
test_vsseg3e16_v_i16m1(int16_t * base,vint16m1_t v0,vint16m1_t v1,vint16m1_t v2,size_t vl)438 void test_vsseg3e16_v_i16m1 (int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) {
439   return vsseg3e16_v_i16m1(base, v0, v1, v2, vl);
440 }
441 
442 // CHECK-RV64-LABEL: @test_vsseg4e16_v_i16m1(
443 // CHECK-RV64-NEXT:  entry:
444 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], <vscale x 4 x i16> [[V2:%.*]], <vscale x 4 x i16> [[V3:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
445 // CHECK-RV64-NEXT:    ret void
446 //
test_vsseg4e16_v_i16m1(int16_t * base,vint16m1_t v0,vint16m1_t v1,vint16m1_t v2,vint16m1_t v3,size_t vl)447 void test_vsseg4e16_v_i16m1 (int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) {
448   return vsseg4e16_v_i16m1(base, v0, v1, v2, v3, vl);
449 }
450 
451 // CHECK-RV64-LABEL: @test_vsseg5e16_v_i16m1(
452 // CHECK-RV64-NEXT:  entry:
453 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], <vscale x 4 x i16> [[V2:%.*]], <vscale x 4 x i16> [[V3:%.*]], <vscale x 4 x i16> [[V4:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
454 // CHECK-RV64-NEXT:    ret void
455 //
test_vsseg5e16_v_i16m1(int16_t * base,vint16m1_t v0,vint16m1_t v1,vint16m1_t v2,vint16m1_t v3,vint16m1_t v4,size_t vl)456 void test_vsseg5e16_v_i16m1 (int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) {
457   return vsseg5e16_v_i16m1(base, v0, v1, v2, v3, v4, vl);
458 }
459 
460 // CHECK-RV64-LABEL: @test_vsseg6e16_v_i16m1(
461 // CHECK-RV64-NEXT:  entry:
462 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], <vscale x 4 x i16> [[V2:%.*]], <vscale x 4 x i16> [[V3:%.*]], <vscale x 4 x i16> [[V4:%.*]], <vscale x 4 x i16> [[V5:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
463 // CHECK-RV64-NEXT:    ret void
464 //
test_vsseg6e16_v_i16m1(int16_t * base,vint16m1_t v0,vint16m1_t v1,vint16m1_t v2,vint16m1_t v3,vint16m1_t v4,vint16m1_t v5,size_t vl)465 void test_vsseg6e16_v_i16m1 (int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) {
466   return vsseg6e16_v_i16m1(base, v0, v1, v2, v3, v4, v5, vl);
467 }
468 
469 // CHECK-RV64-LABEL: @test_vsseg7e16_v_i16m1(
470 // CHECK-RV64-NEXT:  entry:
471 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], <vscale x 4 x i16> [[V2:%.*]], <vscale x 4 x i16> [[V3:%.*]], <vscale x 4 x i16> [[V4:%.*]], <vscale x 4 x i16> [[V5:%.*]], <vscale x 4 x i16> [[V6:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
472 // CHECK-RV64-NEXT:    ret void
473 //
test_vsseg7e16_v_i16m1(int16_t * base,vint16m1_t v0,vint16m1_t v1,vint16m1_t v2,vint16m1_t v3,vint16m1_t v4,vint16m1_t v5,vint16m1_t v6,size_t vl)474 void test_vsseg7e16_v_i16m1 (int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) {
475   return vsseg7e16_v_i16m1(base, v0, v1, v2, v3, v4, v5, v6, vl);
476 }
477 
478 // CHECK-RV64-LABEL: @test_vsseg8e16_v_i16m1(
479 // CHECK-RV64-NEXT:  entry:
480 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], <vscale x 4 x i16> [[V2:%.*]], <vscale x 4 x i16> [[V3:%.*]], <vscale x 4 x i16> [[V4:%.*]], <vscale x 4 x i16> [[V5:%.*]], <vscale x 4 x i16> [[V6:%.*]], <vscale x 4 x i16> [[V7:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
481 // CHECK-RV64-NEXT:    ret void
482 //
test_vsseg8e16_v_i16m1(int16_t * base,vint16m1_t v0,vint16m1_t v1,vint16m1_t v2,vint16m1_t v3,vint16m1_t v4,vint16m1_t v5,vint16m1_t v6,vint16m1_t v7,size_t vl)483 void test_vsseg8e16_v_i16m1 (int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) {
484   return vsseg8e16_v_i16m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
485 }
486 
487 // CHECK-RV64-LABEL: @test_vsseg2e16_v_i16m2(
488 // CHECK-RV64-NEXT:  entry:
489 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv8i16.i64(<vscale x 8 x i16> [[V0:%.*]], <vscale x 8 x i16> [[V1:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
490 // CHECK-RV64-NEXT:    ret void
491 //
test_vsseg2e16_v_i16m2(int16_t * base,vint16m2_t v0,vint16m2_t v1,size_t vl)492 void test_vsseg2e16_v_i16m2 (int16_t *base, vint16m2_t v0, vint16m2_t v1, size_t vl) {
493   return vsseg2e16_v_i16m2(base, v0, v1, vl);
494 }
495 
496 // CHECK-RV64-LABEL: @test_vsseg3e16_v_i16m2(
497 // CHECK-RV64-NEXT:  entry:
498 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv8i16.i64(<vscale x 8 x i16> [[V0:%.*]], <vscale x 8 x i16> [[V1:%.*]], <vscale x 8 x i16> [[V2:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
499 // CHECK-RV64-NEXT:    ret void
500 //
test_vsseg3e16_v_i16m2(int16_t * base,vint16m2_t v0,vint16m2_t v1,vint16m2_t v2,size_t vl)501 void test_vsseg3e16_v_i16m2 (int16_t *base, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) {
502   return vsseg3e16_v_i16m2(base, v0, v1, v2, vl);
503 }
504 
505 // CHECK-RV64-LABEL: @test_vsseg4e16_v_i16m2(
506 // CHECK-RV64-NEXT:  entry:
507 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv8i16.i64(<vscale x 8 x i16> [[V0:%.*]], <vscale x 8 x i16> [[V1:%.*]], <vscale x 8 x i16> [[V2:%.*]], <vscale x 8 x i16> [[V3:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
508 // CHECK-RV64-NEXT:    ret void
509 //
test_vsseg4e16_v_i16m2(int16_t * base,vint16m2_t v0,vint16m2_t v1,vint16m2_t v2,vint16m2_t v3,size_t vl)510 void test_vsseg4e16_v_i16m2 (int16_t *base, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) {
511   return vsseg4e16_v_i16m2(base, v0, v1, v2, v3, vl);
512 }
513 
514 // CHECK-RV64-LABEL: @test_vsseg2e16_v_i16m4(
515 // CHECK-RV64-NEXT:  entry:
516 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv16i16.i64(<vscale x 16 x i16> [[V0:%.*]], <vscale x 16 x i16> [[V1:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
517 // CHECK-RV64-NEXT:    ret void
518 //
test_vsseg2e16_v_i16m4(int16_t * base,vint16m4_t v0,vint16m4_t v1,size_t vl)519 void test_vsseg2e16_v_i16m4 (int16_t *base, vint16m4_t v0, vint16m4_t v1, size_t vl) {
520   return vsseg2e16_v_i16m4(base, v0, v1, vl);
521 }
522 
523 // CHECK-RV64-LABEL: @test_vsseg2e32_v_i32mf2(
524 // CHECK-RV64-NEXT:  entry:
525 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
526 // CHECK-RV64-NEXT:    ret void
527 //
test_vsseg2e32_v_i32mf2(int32_t * base,vint32mf2_t v0,vint32mf2_t v1,size_t vl)528 void test_vsseg2e32_v_i32mf2 (int32_t *base, vint32mf2_t v0, vint32mf2_t v1, size_t vl) {
529   return vsseg2e32_v_i32mf2(base, v0, v1, vl);
530 }
531 
532 // CHECK-RV64-LABEL: @test_vsseg3e32_v_i32mf2(
533 // CHECK-RV64-NEXT:  entry:
534 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], <vscale x 1 x i32> [[V2:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
535 // CHECK-RV64-NEXT:    ret void
536 //
test_vsseg3e32_v_i32mf2(int32_t * base,vint32mf2_t v0,vint32mf2_t v1,vint32mf2_t v2,size_t vl)537 void test_vsseg3e32_v_i32mf2 (int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) {
538   return vsseg3e32_v_i32mf2(base, v0, v1, v2, vl);
539 }
540 
541 // CHECK-RV64-LABEL: @test_vsseg4e32_v_i32mf2(
542 // CHECK-RV64-NEXT:  entry:
543 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], <vscale x 1 x i32> [[V2:%.*]], <vscale x 1 x i32> [[V3:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
544 // CHECK-RV64-NEXT:    ret void
545 //
test_vsseg4e32_v_i32mf2(int32_t * base,vint32mf2_t v0,vint32mf2_t v1,vint32mf2_t v2,vint32mf2_t v3,size_t vl)546 void test_vsseg4e32_v_i32mf2 (int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) {
547   return vsseg4e32_v_i32mf2(base, v0, v1, v2, v3, vl);
548 }
549 
550 // CHECK-RV64-LABEL: @test_vsseg5e32_v_i32mf2(
551 // CHECK-RV64-NEXT:  entry:
552 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], <vscale x 1 x i32> [[V2:%.*]], <vscale x 1 x i32> [[V3:%.*]], <vscale x 1 x i32> [[V4:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
553 // CHECK-RV64-NEXT:    ret void
554 //
test_vsseg5e32_v_i32mf2(int32_t * base,vint32mf2_t v0,vint32mf2_t v1,vint32mf2_t v2,vint32mf2_t v3,vint32mf2_t v4,size_t vl)555 void test_vsseg5e32_v_i32mf2 (int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) {
556   return vsseg5e32_v_i32mf2(base, v0, v1, v2, v3, v4, vl);
557 }
558 
559 // CHECK-RV64-LABEL: @test_vsseg6e32_v_i32mf2(
560 // CHECK-RV64-NEXT:  entry:
561 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], <vscale x 1 x i32> [[V2:%.*]], <vscale x 1 x i32> [[V3:%.*]], <vscale x 1 x i32> [[V4:%.*]], <vscale x 1 x i32> [[V5:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
562 // CHECK-RV64-NEXT:    ret void
563 //
test_vsseg6e32_v_i32mf2(int32_t * base,vint32mf2_t v0,vint32mf2_t v1,vint32mf2_t v2,vint32mf2_t v3,vint32mf2_t v4,vint32mf2_t v5,size_t vl)564 void test_vsseg6e32_v_i32mf2 (int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) {
565   return vsseg6e32_v_i32mf2(base, v0, v1, v2, v3, v4, v5, vl);
566 }
567 
568 // CHECK-RV64-LABEL: @test_vsseg7e32_v_i32mf2(
569 // CHECK-RV64-NEXT:  entry:
570 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], <vscale x 1 x i32> [[V2:%.*]], <vscale x 1 x i32> [[V3:%.*]], <vscale x 1 x i32> [[V4:%.*]], <vscale x 1 x i32> [[V5:%.*]], <vscale x 1 x i32> [[V6:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
571 // CHECK-RV64-NEXT:    ret void
572 //
test_vsseg7e32_v_i32mf2(int32_t * base,vint32mf2_t v0,vint32mf2_t v1,vint32mf2_t v2,vint32mf2_t v3,vint32mf2_t v4,vint32mf2_t v5,vint32mf2_t v6,size_t vl)573 void test_vsseg7e32_v_i32mf2 (int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) {
574   return vsseg7e32_v_i32mf2(base, v0, v1, v2, v3, v4, v5, v6, vl);
575 }
576 
577 // CHECK-RV64-LABEL: @test_vsseg8e32_v_i32mf2(
578 // CHECK-RV64-NEXT:  entry:
579 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], <vscale x 1 x i32> [[V2:%.*]], <vscale x 1 x i32> [[V3:%.*]], <vscale x 1 x i32> [[V4:%.*]], <vscale x 1 x i32> [[V5:%.*]], <vscale x 1 x i32> [[V6:%.*]], <vscale x 1 x i32> [[V7:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
580 // CHECK-RV64-NEXT:    ret void
581 //
test_vsseg8e32_v_i32mf2(int32_t * base,vint32mf2_t v0,vint32mf2_t v1,vint32mf2_t v2,vint32mf2_t v3,vint32mf2_t v4,vint32mf2_t v5,vint32mf2_t v6,vint32mf2_t v7,size_t vl)582 void test_vsseg8e32_v_i32mf2 (int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) {
583   return vsseg8e32_v_i32mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
584 }
585 
586 // CHECK-RV64-LABEL: @test_vsseg2e32_v_i32m1(
587 // CHECK-RV64-NEXT:  entry:
588 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
589 // CHECK-RV64-NEXT:    ret void
590 //
test_vsseg2e32_v_i32m1(int32_t * base,vint32m1_t v0,vint32m1_t v1,size_t vl)591 void test_vsseg2e32_v_i32m1 (int32_t *base, vint32m1_t v0, vint32m1_t v1, size_t vl) {
592   return vsseg2e32_v_i32m1(base, v0, v1, vl);
593 }
594 
595 // CHECK-RV64-LABEL: @test_vsseg3e32_v_i32m1(
596 // CHECK-RV64-NEXT:  entry:
597 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], <vscale x 2 x i32> [[V2:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
598 // CHECK-RV64-NEXT:    ret void
599 //
test_vsseg3e32_v_i32m1(int32_t * base,vint32m1_t v0,vint32m1_t v1,vint32m1_t v2,size_t vl)600 void test_vsseg3e32_v_i32m1 (int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) {
601   return vsseg3e32_v_i32m1(base, v0, v1, v2, vl);
602 }
603 
604 // CHECK-RV64-LABEL: @test_vsseg4e32_v_i32m1(
605 // CHECK-RV64-NEXT:  entry:
606 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], <vscale x 2 x i32> [[V2:%.*]], <vscale x 2 x i32> [[V3:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
607 // CHECK-RV64-NEXT:    ret void
608 //
test_vsseg4e32_v_i32m1(int32_t * base,vint32m1_t v0,vint32m1_t v1,vint32m1_t v2,vint32m1_t v3,size_t vl)609 void test_vsseg4e32_v_i32m1 (int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) {
610   return vsseg4e32_v_i32m1(base, v0, v1, v2, v3, vl);
611 }
612 
613 // CHECK-RV64-LABEL: @test_vsseg5e32_v_i32m1(
614 // CHECK-RV64-NEXT:  entry:
615 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], <vscale x 2 x i32> [[V2:%.*]], <vscale x 2 x i32> [[V3:%.*]], <vscale x 2 x i32> [[V4:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
616 // CHECK-RV64-NEXT:    ret void
617 //
test_vsseg5e32_v_i32m1(int32_t * base,vint32m1_t v0,vint32m1_t v1,vint32m1_t v2,vint32m1_t v3,vint32m1_t v4,size_t vl)618 void test_vsseg5e32_v_i32m1 (int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) {
619   return vsseg5e32_v_i32m1(base, v0, v1, v2, v3, v4, vl);
620 }
621 
622 // CHECK-RV64-LABEL: @test_vsseg6e32_v_i32m1(
623 // CHECK-RV64-NEXT:  entry:
624 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], <vscale x 2 x i32> [[V2:%.*]], <vscale x 2 x i32> [[V3:%.*]], <vscale x 2 x i32> [[V4:%.*]], <vscale x 2 x i32> [[V5:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
625 // CHECK-RV64-NEXT:    ret void
626 //
test_vsseg6e32_v_i32m1(int32_t * base,vint32m1_t v0,vint32m1_t v1,vint32m1_t v2,vint32m1_t v3,vint32m1_t v4,vint32m1_t v5,size_t vl)627 void test_vsseg6e32_v_i32m1 (int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) {
628   return vsseg6e32_v_i32m1(base, v0, v1, v2, v3, v4, v5, vl);
629 }
630 
631 // CHECK-RV64-LABEL: @test_vsseg7e32_v_i32m1(
632 // CHECK-RV64-NEXT:  entry:
633 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], <vscale x 2 x i32> [[V2:%.*]], <vscale x 2 x i32> [[V3:%.*]], <vscale x 2 x i32> [[V4:%.*]], <vscale x 2 x i32> [[V5:%.*]], <vscale x 2 x i32> [[V6:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
634 // CHECK-RV64-NEXT:    ret void
635 //
test_vsseg7e32_v_i32m1(int32_t * base,vint32m1_t v0,vint32m1_t v1,vint32m1_t v2,vint32m1_t v3,vint32m1_t v4,vint32m1_t v5,vint32m1_t v6,size_t vl)636 void test_vsseg7e32_v_i32m1 (int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) {
637   return vsseg7e32_v_i32m1(base, v0, v1, v2, v3, v4, v5, v6, vl);
638 }
639 
640 // CHECK-RV64-LABEL: @test_vsseg8e32_v_i32m1(
641 // CHECK-RV64-NEXT:  entry:
642 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], <vscale x 2 x i32> [[V2:%.*]], <vscale x 2 x i32> [[V3:%.*]], <vscale x 2 x i32> [[V4:%.*]], <vscale x 2 x i32> [[V5:%.*]], <vscale x 2 x i32> [[V6:%.*]], <vscale x 2 x i32> [[V7:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
643 // CHECK-RV64-NEXT:    ret void
644 //
test_vsseg8e32_v_i32m1(int32_t * base,vint32m1_t v0,vint32m1_t v1,vint32m1_t v2,vint32m1_t v3,vint32m1_t v4,vint32m1_t v5,vint32m1_t v6,vint32m1_t v7,size_t vl)645 void test_vsseg8e32_v_i32m1 (int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) {
646   return vsseg8e32_v_i32m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
647 }
648 
649 // CHECK-RV64-LABEL: @test_vsseg2e32_v_i32m2(
650 // CHECK-RV64-NEXT:  entry:
651 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv4i32.i64(<vscale x 4 x i32> [[V0:%.*]], <vscale x 4 x i32> [[V1:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
652 // CHECK-RV64-NEXT:    ret void
653 //
test_vsseg2e32_v_i32m2(int32_t * base,vint32m2_t v0,vint32m2_t v1,size_t vl)654 void test_vsseg2e32_v_i32m2 (int32_t *base, vint32m2_t v0, vint32m2_t v1, size_t vl) {
655   return vsseg2e32_v_i32m2(base, v0, v1, vl);
656 }
657 
658 // CHECK-RV64-LABEL: @test_vsseg3e32_v_i32m2(
659 // CHECK-RV64-NEXT:  entry:
660 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv4i32.i64(<vscale x 4 x i32> [[V0:%.*]], <vscale x 4 x i32> [[V1:%.*]], <vscale x 4 x i32> [[V2:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
661 // CHECK-RV64-NEXT:    ret void
662 //
test_vsseg3e32_v_i32m2(int32_t * base,vint32m2_t v0,vint32m2_t v1,vint32m2_t v2,size_t vl)663 void test_vsseg3e32_v_i32m2 (int32_t *base, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) {
664   return vsseg3e32_v_i32m2(base, v0, v1, v2, vl);
665 }
666 
667 // CHECK-RV64-LABEL: @test_vsseg4e32_v_i32m2(
668 // CHECK-RV64-NEXT:  entry:
669 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv4i32.i64(<vscale x 4 x i32> [[V0:%.*]], <vscale x 4 x i32> [[V1:%.*]], <vscale x 4 x i32> [[V2:%.*]], <vscale x 4 x i32> [[V3:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
670 // CHECK-RV64-NEXT:    ret void
671 //
test_vsseg4e32_v_i32m2(int32_t * base,vint32m2_t v0,vint32m2_t v1,vint32m2_t v2,vint32m2_t v3,size_t vl)672 void test_vsseg4e32_v_i32m2 (int32_t *base, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) {
673   return vsseg4e32_v_i32m2(base, v0, v1, v2, v3, vl);
674 }
675 
676 // CHECK-RV64-LABEL: @test_vsseg2e32_v_i32m4(
677 // CHECK-RV64-NEXT:  entry:
678 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv8i32.i64(<vscale x 8 x i32> [[V0:%.*]], <vscale x 8 x i32> [[V1:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
679 // CHECK-RV64-NEXT:    ret void
680 //
test_vsseg2e32_v_i32m4(int32_t * base,vint32m4_t v0,vint32m4_t v1,size_t vl)681 void test_vsseg2e32_v_i32m4 (int32_t *base, vint32m4_t v0, vint32m4_t v1, size_t vl) {
682   return vsseg2e32_v_i32m4(base, v0, v1, vl);
683 }
684 
685 // CHECK-RV64-LABEL: @test_vsseg2e64_v_i64m1(
686 // CHECK-RV64-NEXT:  entry:
687 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]])
688 // CHECK-RV64-NEXT:    ret void
689 //
test_vsseg2e64_v_i64m1(int64_t * base,vint64m1_t v0,vint64m1_t v1,size_t vl)690 void test_vsseg2e64_v_i64m1 (int64_t *base, vint64m1_t v0, vint64m1_t v1, size_t vl) {
691   return vsseg2e64_v_i64m1(base, v0, v1, vl);
692 }
693 
694 // CHECK-RV64-LABEL: @test_vsseg3e64_v_i64m1(
695 // CHECK-RV64-NEXT:  entry:
696 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], <vscale x 1 x i64> [[V2:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]])
697 // CHECK-RV64-NEXT:    ret void
698 //
test_vsseg3e64_v_i64m1(int64_t * base,vint64m1_t v0,vint64m1_t v1,vint64m1_t v2,size_t vl)699 void test_vsseg3e64_v_i64m1 (int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) {
700   return vsseg3e64_v_i64m1(base, v0, v1, v2, vl);
701 }
702 
703 // CHECK-RV64-LABEL: @test_vsseg4e64_v_i64m1(
704 // CHECK-RV64-NEXT:  entry:
705 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], <vscale x 1 x i64> [[V2:%.*]], <vscale x 1 x i64> [[V3:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]])
706 // CHECK-RV64-NEXT:    ret void
707 //
test_vsseg4e64_v_i64m1(int64_t * base,vint64m1_t v0,vint64m1_t v1,vint64m1_t v2,vint64m1_t v3,size_t vl)708 void test_vsseg4e64_v_i64m1 (int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) {
709   return vsseg4e64_v_i64m1(base, v0, v1, v2, v3, vl);
710 }
711 
712 // CHECK-RV64-LABEL: @test_vsseg5e64_v_i64m1(
713 // CHECK-RV64-NEXT:  entry:
714 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], <vscale x 1 x i64> [[V2:%.*]], <vscale x 1 x i64> [[V3:%.*]], <vscale x 1 x i64> [[V4:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]])
715 // CHECK-RV64-NEXT:    ret void
716 //
test_vsseg5e64_v_i64m1(int64_t * base,vint64m1_t v0,vint64m1_t v1,vint64m1_t v2,vint64m1_t v3,vint64m1_t v4,size_t vl)717 void test_vsseg5e64_v_i64m1 (int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) {
718   return vsseg5e64_v_i64m1(base, v0, v1, v2, v3, v4, vl);
719 }
720 
721 // CHECK-RV64-LABEL: @test_vsseg6e64_v_i64m1(
722 // CHECK-RV64-NEXT:  entry:
723 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], <vscale x 1 x i64> [[V2:%.*]], <vscale x 1 x i64> [[V3:%.*]], <vscale x 1 x i64> [[V4:%.*]], <vscale x 1 x i64> [[V5:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]])
724 // CHECK-RV64-NEXT:    ret void
725 //
test_vsseg6e64_v_i64m1(int64_t * base,vint64m1_t v0,vint64m1_t v1,vint64m1_t v2,vint64m1_t v3,vint64m1_t v4,vint64m1_t v5,size_t vl)726 void test_vsseg6e64_v_i64m1 (int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) {
727   return vsseg6e64_v_i64m1(base, v0, v1, v2, v3, v4, v5, vl);
728 }
729 
730 // CHECK-RV64-LABEL: @test_vsseg7e64_v_i64m1(
731 // CHECK-RV64-NEXT:  entry:
732 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], <vscale x 1 x i64> [[V2:%.*]], <vscale x 1 x i64> [[V3:%.*]], <vscale x 1 x i64> [[V4:%.*]], <vscale x 1 x i64> [[V5:%.*]], <vscale x 1 x i64> [[V6:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]])
733 // CHECK-RV64-NEXT:    ret void
734 //
test_vsseg7e64_v_i64m1(int64_t * base,vint64m1_t v0,vint64m1_t v1,vint64m1_t v2,vint64m1_t v3,vint64m1_t v4,vint64m1_t v5,vint64m1_t v6,size_t vl)735 void test_vsseg7e64_v_i64m1 (int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) {
736   return vsseg7e64_v_i64m1(base, v0, v1, v2, v3, v4, v5, v6, vl);
737 }
738 
739 // CHECK-RV64-LABEL: @test_vsseg8e64_v_i64m1(
740 // CHECK-RV64-NEXT:  entry:
741 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], <vscale x 1 x i64> [[V2:%.*]], <vscale x 1 x i64> [[V3:%.*]], <vscale x 1 x i64> [[V4:%.*]], <vscale x 1 x i64> [[V5:%.*]], <vscale x 1 x i64> [[V6:%.*]], <vscale x 1 x i64> [[V7:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]])
742 // CHECK-RV64-NEXT:    ret void
743 //
test_vsseg8e64_v_i64m1(int64_t * base,vint64m1_t v0,vint64m1_t v1,vint64m1_t v2,vint64m1_t v3,vint64m1_t v4,vint64m1_t v5,vint64m1_t v6,vint64m1_t v7,size_t vl)744 void test_vsseg8e64_v_i64m1 (int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) {
745   return vsseg8e64_v_i64m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
746 }
747 
748 // CHECK-RV64-LABEL: @test_vsseg2e64_v_i64m2(
749 // CHECK-RV64-NEXT:  entry:
750 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv2i64.i64(<vscale x 2 x i64> [[V0:%.*]], <vscale x 2 x i64> [[V1:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]])
751 // CHECK-RV64-NEXT:    ret void
752 //
test_vsseg2e64_v_i64m2(int64_t * base,vint64m2_t v0,vint64m2_t v1,size_t vl)753 void test_vsseg2e64_v_i64m2 (int64_t *base, vint64m2_t v0, vint64m2_t v1, size_t vl) {
754   return vsseg2e64_v_i64m2(base, v0, v1, vl);
755 }
756 
757 // CHECK-RV64-LABEL: @test_vsseg3e64_v_i64m2(
758 // CHECK-RV64-NEXT:  entry:
759 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv2i64.i64(<vscale x 2 x i64> [[V0:%.*]], <vscale x 2 x i64> [[V1:%.*]], <vscale x 2 x i64> [[V2:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]])
760 // CHECK-RV64-NEXT:    ret void
761 //
test_vsseg3e64_v_i64m2(int64_t * base,vint64m2_t v0,vint64m2_t v1,vint64m2_t v2,size_t vl)762 void test_vsseg3e64_v_i64m2 (int64_t *base, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) {
763   return vsseg3e64_v_i64m2(base, v0, v1, v2, vl);
764 }
765 
766 // CHECK-RV64-LABEL: @test_vsseg4e64_v_i64m2(
767 // CHECK-RV64-NEXT:  entry:
768 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv2i64.i64(<vscale x 2 x i64> [[V0:%.*]], <vscale x 2 x i64> [[V1:%.*]], <vscale x 2 x i64> [[V2:%.*]], <vscale x 2 x i64> [[V3:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]])
769 // CHECK-RV64-NEXT:    ret void
770 //
test_vsseg4e64_v_i64m2(int64_t * base,vint64m2_t v0,vint64m2_t v1,vint64m2_t v2,vint64m2_t v3,size_t vl)771 void test_vsseg4e64_v_i64m2 (int64_t *base, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) {
772   return vsseg4e64_v_i64m2(base, v0, v1, v2, v3, vl);
773 }
774 
775 // CHECK-RV64-LABEL: @test_vsseg2e64_v_i64m4(
776 // CHECK-RV64-NEXT:  entry:
777 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv4i64.i64(<vscale x 4 x i64> [[V0:%.*]], <vscale x 4 x i64> [[V1:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]])
778 // CHECK-RV64-NEXT:    ret void
779 //
test_vsseg2e64_v_i64m4(int64_t * base,vint64m4_t v0,vint64m4_t v1,size_t vl)780 void test_vsseg2e64_v_i64m4 (int64_t *base, vint64m4_t v0, vint64m4_t v1, size_t vl) {
781   return vsseg2e64_v_i64m4(base, v0, v1, vl);
782 }
783 
784 // CHECK-RV64-LABEL: @test_vsseg2e8_v_u8mf8(
785 // CHECK-RV64-NEXT:  entry:
786 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
787 // CHECK-RV64-NEXT:    ret void
788 //
test_vsseg2e8_v_u8mf8(uint8_t * base,vuint8mf8_t v0,vuint8mf8_t v1,size_t vl)789 void test_vsseg2e8_v_u8mf8 (uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) {
790   return vsseg2e8_v_u8mf8(base, v0, v1, vl);
791 }
792 
793 // CHECK-RV64-LABEL: @test_vsseg3e8_v_u8mf8(
794 // CHECK-RV64-NEXT:  entry:
795 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], <vscale x 1 x i8> [[V2:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
796 // CHECK-RV64-NEXT:    ret void
797 //
test_vsseg3e8_v_u8mf8(uint8_t * base,vuint8mf8_t v0,vuint8mf8_t v1,vuint8mf8_t v2,size_t vl)798 void test_vsseg3e8_v_u8mf8 (uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) {
799   return vsseg3e8_v_u8mf8(base, v0, v1, v2, vl);
800 }
801 
802 // CHECK-RV64-LABEL: @test_vsseg4e8_v_u8mf8(
803 // CHECK-RV64-NEXT:  entry:
804 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], <vscale x 1 x i8> [[V2:%.*]], <vscale x 1 x i8> [[V3:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
805 // CHECK-RV64-NEXT:    ret void
806 //
test_vsseg4e8_v_u8mf8(uint8_t * base,vuint8mf8_t v0,vuint8mf8_t v1,vuint8mf8_t v2,vuint8mf8_t v3,size_t vl)807 void test_vsseg4e8_v_u8mf8 (uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) {
808   return vsseg4e8_v_u8mf8(base, v0, v1, v2, v3, vl);
809 }
810 
811 // CHECK-RV64-LABEL: @test_vsseg5e8_v_u8mf8(
812 // CHECK-RV64-NEXT:  entry:
813 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], <vscale x 1 x i8> [[V2:%.*]], <vscale x 1 x i8> [[V3:%.*]], <vscale x 1 x i8> [[V4:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
814 // CHECK-RV64-NEXT:    ret void
815 //
test_vsseg5e8_v_u8mf8(uint8_t * base,vuint8mf8_t v0,vuint8mf8_t v1,vuint8mf8_t v2,vuint8mf8_t v3,vuint8mf8_t v4,size_t vl)816 void test_vsseg5e8_v_u8mf8 (uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) {
817   return vsseg5e8_v_u8mf8(base, v0, v1, v2, v3, v4, vl);
818 }
819 
820 // CHECK-RV64-LABEL: @test_vsseg6e8_v_u8mf8(
821 // CHECK-RV64-NEXT:  entry:
822 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], <vscale x 1 x i8> [[V2:%.*]], <vscale x 1 x i8> [[V3:%.*]], <vscale x 1 x i8> [[V4:%.*]], <vscale x 1 x i8> [[V5:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
823 // CHECK-RV64-NEXT:    ret void
824 //
test_vsseg6e8_v_u8mf8(uint8_t * base,vuint8mf8_t v0,vuint8mf8_t v1,vuint8mf8_t v2,vuint8mf8_t v3,vuint8mf8_t v4,vuint8mf8_t v5,size_t vl)825 void test_vsseg6e8_v_u8mf8 (uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) {
826   return vsseg6e8_v_u8mf8(base, v0, v1, v2, v3, v4, v5, vl);
827 }
828 
829 // CHECK-RV64-LABEL: @test_vsseg7e8_v_u8mf8(
830 // CHECK-RV64-NEXT:  entry:
831 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], <vscale x 1 x i8> [[V2:%.*]], <vscale x 1 x i8> [[V3:%.*]], <vscale x 1 x i8> [[V4:%.*]], <vscale x 1 x i8> [[V5:%.*]], <vscale x 1 x i8> [[V6:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
832 // CHECK-RV64-NEXT:    ret void
833 //
test_vsseg7e8_v_u8mf8(uint8_t * base,vuint8mf8_t v0,vuint8mf8_t v1,vuint8mf8_t v2,vuint8mf8_t v3,vuint8mf8_t v4,vuint8mf8_t v5,vuint8mf8_t v6,size_t vl)834 void test_vsseg7e8_v_u8mf8 (uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) {
835   return vsseg7e8_v_u8mf8(base, v0, v1, v2, v3, v4, v5, v6, vl);
836 }
837 
838 // CHECK-RV64-LABEL: @test_vsseg8e8_v_u8mf8(
839 // CHECK-RV64-NEXT:  entry:
840 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], <vscale x 1 x i8> [[V2:%.*]], <vscale x 1 x i8> [[V3:%.*]], <vscale x 1 x i8> [[V4:%.*]], <vscale x 1 x i8> [[V5:%.*]], <vscale x 1 x i8> [[V6:%.*]], <vscale x 1 x i8> [[V7:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
841 // CHECK-RV64-NEXT:    ret void
842 //
test_vsseg8e8_v_u8mf8(uint8_t * base,vuint8mf8_t v0,vuint8mf8_t v1,vuint8mf8_t v2,vuint8mf8_t v3,vuint8mf8_t v4,vuint8mf8_t v5,vuint8mf8_t v6,vuint8mf8_t v7,size_t vl)843 void test_vsseg8e8_v_u8mf8 (uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) {
844   return vsseg8e8_v_u8mf8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
845 }
846 
847 // CHECK-RV64-LABEL: @test_vsseg2e8_v_u8mf4(
848 // CHECK-RV64-NEXT:  entry:
849 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
850 // CHECK-RV64-NEXT:    ret void
851 //
test_vsseg2e8_v_u8mf4(uint8_t * base,vuint8mf4_t v0,vuint8mf4_t v1,size_t vl)852 void test_vsseg2e8_v_u8mf4 (uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) {
853   return vsseg2e8_v_u8mf4(base, v0, v1, vl);
854 }
855 
856 // CHECK-RV64-LABEL: @test_vsseg3e8_v_u8mf4(
857 // CHECK-RV64-NEXT:  entry:
858 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], <vscale x 2 x i8> [[V2:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
859 // CHECK-RV64-NEXT:    ret void
860 //
test_vsseg3e8_v_u8mf4(uint8_t * base,vuint8mf4_t v0,vuint8mf4_t v1,vuint8mf4_t v2,size_t vl)861 void test_vsseg3e8_v_u8mf4 (uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) {
862   return vsseg3e8_v_u8mf4(base, v0, v1, v2, vl);
863 }
864 
865 // CHECK-RV64-LABEL: @test_vsseg4e8_v_u8mf4(
866 // CHECK-RV64-NEXT:  entry:
867 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], <vscale x 2 x i8> [[V2:%.*]], <vscale x 2 x i8> [[V3:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
868 // CHECK-RV64-NEXT:    ret void
869 //
test_vsseg4e8_v_u8mf4(uint8_t * base,vuint8mf4_t v0,vuint8mf4_t v1,vuint8mf4_t v2,vuint8mf4_t v3,size_t vl)870 void test_vsseg4e8_v_u8mf4 (uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) {
871   return vsseg4e8_v_u8mf4(base, v0, v1, v2, v3, vl);
872 }
873 
874 // CHECK-RV64-LABEL: @test_vsseg5e8_v_u8mf4(
875 // CHECK-RV64-NEXT:  entry:
876 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], <vscale x 2 x i8> [[V2:%.*]], <vscale x 2 x i8> [[V3:%.*]], <vscale x 2 x i8> [[V4:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
877 // CHECK-RV64-NEXT:    ret void
878 //
test_vsseg5e8_v_u8mf4(uint8_t * base,vuint8mf4_t v0,vuint8mf4_t v1,vuint8mf4_t v2,vuint8mf4_t v3,vuint8mf4_t v4,size_t vl)879 void test_vsseg5e8_v_u8mf4 (uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) {
880   return vsseg5e8_v_u8mf4(base, v0, v1, v2, v3, v4, vl);
881 }
882 
883 // CHECK-RV64-LABEL: @test_vsseg6e8_v_u8mf4(
884 // CHECK-RV64-NEXT:  entry:
885 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], <vscale x 2 x i8> [[V2:%.*]], <vscale x 2 x i8> [[V3:%.*]], <vscale x 2 x i8> [[V4:%.*]], <vscale x 2 x i8> [[V5:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
886 // CHECK-RV64-NEXT:    ret void
887 //
test_vsseg6e8_v_u8mf4(uint8_t * base,vuint8mf4_t v0,vuint8mf4_t v1,vuint8mf4_t v2,vuint8mf4_t v3,vuint8mf4_t v4,vuint8mf4_t v5,size_t vl)888 void test_vsseg6e8_v_u8mf4 (uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) {
889   return vsseg6e8_v_u8mf4(base, v0, v1, v2, v3, v4, v5, vl);
890 }
891 
892 // CHECK-RV64-LABEL: @test_vsseg7e8_v_u8mf4(
893 // CHECK-RV64-NEXT:  entry:
894 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], <vscale x 2 x i8> [[V2:%.*]], <vscale x 2 x i8> [[V3:%.*]], <vscale x 2 x i8> [[V4:%.*]], <vscale x 2 x i8> [[V5:%.*]], <vscale x 2 x i8> [[V6:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
895 // CHECK-RV64-NEXT:    ret void
896 //
test_vsseg7e8_v_u8mf4(uint8_t * base,vuint8mf4_t v0,vuint8mf4_t v1,vuint8mf4_t v2,vuint8mf4_t v3,vuint8mf4_t v4,vuint8mf4_t v5,vuint8mf4_t v6,size_t vl)897 void test_vsseg7e8_v_u8mf4 (uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) {
898   return vsseg7e8_v_u8mf4(base, v0, v1, v2, v3, v4, v5, v6, vl);
899 }
900 
901 // CHECK-RV64-LABEL: @test_vsseg8e8_v_u8mf4(
902 // CHECK-RV64-NEXT:  entry:
903 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], <vscale x 2 x i8> [[V2:%.*]], <vscale x 2 x i8> [[V3:%.*]], <vscale x 2 x i8> [[V4:%.*]], <vscale x 2 x i8> [[V5:%.*]], <vscale x 2 x i8> [[V6:%.*]], <vscale x 2 x i8> [[V7:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
904 // CHECK-RV64-NEXT:    ret void
905 //
test_vsseg8e8_v_u8mf4(uint8_t * base,vuint8mf4_t v0,vuint8mf4_t v1,vuint8mf4_t v2,vuint8mf4_t v3,vuint8mf4_t v4,vuint8mf4_t v5,vuint8mf4_t v6,vuint8mf4_t v7,size_t vl)906 void test_vsseg8e8_v_u8mf4 (uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) {
907   return vsseg8e8_v_u8mf4(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
908 }
909 
910 // CHECK-RV64-LABEL: @test_vsseg2e8_v_u8mf2(
911 // CHECK-RV64-NEXT:  entry:
912 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
913 // CHECK-RV64-NEXT:    ret void
914 //
test_vsseg2e8_v_u8mf2(uint8_t * base,vuint8mf2_t v0,vuint8mf2_t v1,size_t vl)915 void test_vsseg2e8_v_u8mf2 (uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) {
916   return vsseg2e8_v_u8mf2(base, v0, v1, vl);
917 }
918 
919 // CHECK-RV64-LABEL: @test_vsseg3e8_v_u8mf2(
920 // CHECK-RV64-NEXT:  entry:
921 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], <vscale x 4 x i8> [[V2:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
922 // CHECK-RV64-NEXT:    ret void
923 //
test_vsseg3e8_v_u8mf2(uint8_t * base,vuint8mf2_t v0,vuint8mf2_t v1,vuint8mf2_t v2,size_t vl)924 void test_vsseg3e8_v_u8mf2 (uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) {
925   return vsseg3e8_v_u8mf2(base, v0, v1, v2, vl);
926 }
927 
928 // CHECK-RV64-LABEL: @test_vsseg4e8_v_u8mf2(
929 // CHECK-RV64-NEXT:  entry:
930 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], <vscale x 4 x i8> [[V2:%.*]], <vscale x 4 x i8> [[V3:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
931 // CHECK-RV64-NEXT:    ret void
932 //
test_vsseg4e8_v_u8mf2(uint8_t * base,vuint8mf2_t v0,vuint8mf2_t v1,vuint8mf2_t v2,vuint8mf2_t v3,size_t vl)933 void test_vsseg4e8_v_u8mf2 (uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) {
934   return vsseg4e8_v_u8mf2(base, v0, v1, v2, v3, vl);
935 }
936 
937 // CHECK-RV64-LABEL: @test_vsseg5e8_v_u8mf2(
938 // CHECK-RV64-NEXT:  entry:
939 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], <vscale x 4 x i8> [[V2:%.*]], <vscale x 4 x i8> [[V3:%.*]], <vscale x 4 x i8> [[V4:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
940 // CHECK-RV64-NEXT:    ret void
941 //
test_vsseg5e8_v_u8mf2(uint8_t * base,vuint8mf2_t v0,vuint8mf2_t v1,vuint8mf2_t v2,vuint8mf2_t v3,vuint8mf2_t v4,size_t vl)942 void test_vsseg5e8_v_u8mf2 (uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) {
943   return vsseg5e8_v_u8mf2(base, v0, v1, v2, v3, v4, vl);
944 }
945 
946 // CHECK-RV64-LABEL: @test_vsseg6e8_v_u8mf2(
947 // CHECK-RV64-NEXT:  entry:
948 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], <vscale x 4 x i8> [[V2:%.*]], <vscale x 4 x i8> [[V3:%.*]], <vscale x 4 x i8> [[V4:%.*]], <vscale x 4 x i8> [[V5:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
949 // CHECK-RV64-NEXT:    ret void
950 //
test_vsseg6e8_v_u8mf2(uint8_t * base,vuint8mf2_t v0,vuint8mf2_t v1,vuint8mf2_t v2,vuint8mf2_t v3,vuint8mf2_t v4,vuint8mf2_t v5,size_t vl)951 void test_vsseg6e8_v_u8mf2 (uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) {
952   return vsseg6e8_v_u8mf2(base, v0, v1, v2, v3, v4, v5, vl);
953 }
954 
955 // CHECK-RV64-LABEL: @test_vsseg7e8_v_u8mf2(
956 // CHECK-RV64-NEXT:  entry:
957 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], <vscale x 4 x i8> [[V2:%.*]], <vscale x 4 x i8> [[V3:%.*]], <vscale x 4 x i8> [[V4:%.*]], <vscale x 4 x i8> [[V5:%.*]], <vscale x 4 x i8> [[V6:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
958 // CHECK-RV64-NEXT:    ret void
959 //
test_vsseg7e8_v_u8mf2(uint8_t * base,vuint8mf2_t v0,vuint8mf2_t v1,vuint8mf2_t v2,vuint8mf2_t v3,vuint8mf2_t v4,vuint8mf2_t v5,vuint8mf2_t v6,size_t vl)960 void test_vsseg7e8_v_u8mf2 (uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) {
961   return vsseg7e8_v_u8mf2(base, v0, v1, v2, v3, v4, v5, v6, vl);
962 }
963 
964 // CHECK-RV64-LABEL: @test_vsseg8e8_v_u8mf2(
965 // CHECK-RV64-NEXT:  entry:
966 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], <vscale x 4 x i8> [[V2:%.*]], <vscale x 4 x i8> [[V3:%.*]], <vscale x 4 x i8> [[V4:%.*]], <vscale x 4 x i8> [[V5:%.*]], <vscale x 4 x i8> [[V6:%.*]], <vscale x 4 x i8> [[V7:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
967 // CHECK-RV64-NEXT:    ret void
968 //
test_vsseg8e8_v_u8mf2(uint8_t * base,vuint8mf2_t v0,vuint8mf2_t v1,vuint8mf2_t v2,vuint8mf2_t v3,vuint8mf2_t v4,vuint8mf2_t v5,vuint8mf2_t v6,vuint8mf2_t v7,size_t vl)969 void test_vsseg8e8_v_u8mf2 (uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) {
970   return vsseg8e8_v_u8mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
971 }
972 
973 // CHECK-RV64-LABEL: @test_vsseg2e8_v_u8m1(
974 // CHECK-RV64-NEXT:  entry:
975 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
976 // CHECK-RV64-NEXT:    ret void
977 //
test_vsseg2e8_v_u8m1(uint8_t * base,vuint8m1_t v0,vuint8m1_t v1,size_t vl)978 void test_vsseg2e8_v_u8m1 (uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, size_t vl) {
979   return vsseg2e8_v_u8m1(base, v0, v1, vl);
980 }
981 
982 // CHECK-RV64-LABEL: @test_vsseg3e8_v_u8m1(
983 // CHECK-RV64-NEXT:  entry:
984 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], <vscale x 8 x i8> [[V2:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
985 // CHECK-RV64-NEXT:    ret void
986 //
test_vsseg3e8_v_u8m1(uint8_t * base,vuint8m1_t v0,vuint8m1_t v1,vuint8m1_t v2,size_t vl)987 void test_vsseg3e8_v_u8m1 (uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) {
988   return vsseg3e8_v_u8m1(base, v0, v1, v2, vl);
989 }
990 
991 // CHECK-RV64-LABEL: @test_vsseg4e8_v_u8m1(
992 // CHECK-RV64-NEXT:  entry:
993 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], <vscale x 8 x i8> [[V2:%.*]], <vscale x 8 x i8> [[V3:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
994 // CHECK-RV64-NEXT:    ret void
995 //
test_vsseg4e8_v_u8m1(uint8_t * base,vuint8m1_t v0,vuint8m1_t v1,vuint8m1_t v2,vuint8m1_t v3,size_t vl)996 void test_vsseg4e8_v_u8m1 (uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) {
997   return vsseg4e8_v_u8m1(base, v0, v1, v2, v3, vl);
998 }
999 
1000 // CHECK-RV64-LABEL: @test_vsseg5e8_v_u8m1(
1001 // CHECK-RV64-NEXT:  entry:
1002 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], <vscale x 8 x i8> [[V2:%.*]], <vscale x 8 x i8> [[V3:%.*]], <vscale x 8 x i8> [[V4:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
1003 // CHECK-RV64-NEXT:    ret void
1004 //
test_vsseg5e8_v_u8m1(uint8_t * base,vuint8m1_t v0,vuint8m1_t v1,vuint8m1_t v2,vuint8m1_t v3,vuint8m1_t v4,size_t vl)1005 void test_vsseg5e8_v_u8m1 (uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) {
1006   return vsseg5e8_v_u8m1(base, v0, v1, v2, v3, v4, vl);
1007 }
1008 
1009 // CHECK-RV64-LABEL: @test_vsseg6e8_v_u8m1(
1010 // CHECK-RV64-NEXT:  entry:
1011 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], <vscale x 8 x i8> [[V2:%.*]], <vscale x 8 x i8> [[V3:%.*]], <vscale x 8 x i8> [[V4:%.*]], <vscale x 8 x i8> [[V5:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
1012 // CHECK-RV64-NEXT:    ret void
1013 //
test_vsseg6e8_v_u8m1(uint8_t * base,vuint8m1_t v0,vuint8m1_t v1,vuint8m1_t v2,vuint8m1_t v3,vuint8m1_t v4,vuint8m1_t v5,size_t vl)1014 void test_vsseg6e8_v_u8m1 (uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) {
1015   return vsseg6e8_v_u8m1(base, v0, v1, v2, v3, v4, v5, vl);
1016 }
1017 
1018 // CHECK-RV64-LABEL: @test_vsseg7e8_v_u8m1(
1019 // CHECK-RV64-NEXT:  entry:
1020 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], <vscale x 8 x i8> [[V2:%.*]], <vscale x 8 x i8> [[V3:%.*]], <vscale x 8 x i8> [[V4:%.*]], <vscale x 8 x i8> [[V5:%.*]], <vscale x 8 x i8> [[V6:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
1021 // CHECK-RV64-NEXT:    ret void
1022 //
test_vsseg7e8_v_u8m1(uint8_t * base,vuint8m1_t v0,vuint8m1_t v1,vuint8m1_t v2,vuint8m1_t v3,vuint8m1_t v4,vuint8m1_t v5,vuint8m1_t v6,size_t vl)1023 void test_vsseg7e8_v_u8m1 (uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) {
1024   return vsseg7e8_v_u8m1(base, v0, v1, v2, v3, v4, v5, v6, vl);
1025 }
1026 
1027 // CHECK-RV64-LABEL: @test_vsseg8e8_v_u8m1(
1028 // CHECK-RV64-NEXT:  entry:
1029 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], <vscale x 8 x i8> [[V2:%.*]], <vscale x 8 x i8> [[V3:%.*]], <vscale x 8 x i8> [[V4:%.*]], <vscale x 8 x i8> [[V5:%.*]], <vscale x 8 x i8> [[V6:%.*]], <vscale x 8 x i8> [[V7:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
1030 // CHECK-RV64-NEXT:    ret void
1031 //
test_vsseg8e8_v_u8m1(uint8_t * base,vuint8m1_t v0,vuint8m1_t v1,vuint8m1_t v2,vuint8m1_t v3,vuint8m1_t v4,vuint8m1_t v5,vuint8m1_t v6,vuint8m1_t v7,size_t vl)1032 void test_vsseg8e8_v_u8m1 (uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) {
1033   return vsseg8e8_v_u8m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
1034 }
1035 
1036 // CHECK-RV64-LABEL: @test_vsseg2e8_v_u8m2(
1037 // CHECK-RV64-NEXT:  entry:
1038 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv16i8.i64(<vscale x 16 x i8> [[V0:%.*]], <vscale x 16 x i8> [[V1:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
1039 // CHECK-RV64-NEXT:    ret void
1040 //
test_vsseg2e8_v_u8m2(uint8_t * base,vuint8m2_t v0,vuint8m2_t v1,size_t vl)1041 void test_vsseg2e8_v_u8m2 (uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, size_t vl) {
1042   return vsseg2e8_v_u8m2(base, v0, v1, vl);
1043 }
1044 
1045 // CHECK-RV64-LABEL: @test_vsseg3e8_v_u8m2(
1046 // CHECK-RV64-NEXT:  entry:
1047 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv16i8.i64(<vscale x 16 x i8> [[V0:%.*]], <vscale x 16 x i8> [[V1:%.*]], <vscale x 16 x i8> [[V2:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
1048 // CHECK-RV64-NEXT:    ret void
1049 //
test_vsseg3e8_v_u8m2(uint8_t * base,vuint8m2_t v0,vuint8m2_t v1,vuint8m2_t v2,size_t vl)1050 void test_vsseg3e8_v_u8m2 (uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) {
1051   return vsseg3e8_v_u8m2(base, v0, v1, v2, vl);
1052 }
1053 
1054 // CHECK-RV64-LABEL: @test_vsseg4e8_v_u8m2(
1055 // CHECK-RV64-NEXT:  entry:
1056 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv16i8.i64(<vscale x 16 x i8> [[V0:%.*]], <vscale x 16 x i8> [[V1:%.*]], <vscale x 16 x i8> [[V2:%.*]], <vscale x 16 x i8> [[V3:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
1057 // CHECK-RV64-NEXT:    ret void
1058 //
test_vsseg4e8_v_u8m2(uint8_t * base,vuint8m2_t v0,vuint8m2_t v1,vuint8m2_t v2,vuint8m2_t v3,size_t vl)1059 void test_vsseg4e8_v_u8m2 (uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) {
1060   return vsseg4e8_v_u8m2(base, v0, v1, v2, v3, vl);
1061 }
1062 
1063 // CHECK-RV64-LABEL: @test_vsseg2e8_v_u8m4(
1064 // CHECK-RV64-NEXT:  entry:
1065 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv32i8.i64(<vscale x 32 x i8> [[V0:%.*]], <vscale x 32 x i8> [[V1:%.*]], i8* [[BASE:%.*]], i64 [[VL:%.*]])
1066 // CHECK-RV64-NEXT:    ret void
1067 //
test_vsseg2e8_v_u8m4(uint8_t * base,vuint8m4_t v0,vuint8m4_t v1,size_t vl)1068 void test_vsseg2e8_v_u8m4 (uint8_t *base, vuint8m4_t v0, vuint8m4_t v1, size_t vl) {
1069   return vsseg2e8_v_u8m4(base, v0, v1, vl);
1070 }
1071 
1072 // CHECK-RV64-LABEL: @test_vsseg2e16_v_u16mf4(
1073 // CHECK-RV64-NEXT:  entry:
1074 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
1075 // CHECK-RV64-NEXT:    ret void
1076 //
test_vsseg2e16_v_u16mf4(uint16_t * base,vuint16mf4_t v0,vuint16mf4_t v1,size_t vl)1077 void test_vsseg2e16_v_u16mf4 (uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) {
1078   return vsseg2e16_v_u16mf4(base, v0, v1, vl);
1079 }
1080 
1081 // CHECK-RV64-LABEL: @test_vsseg3e16_v_u16mf4(
1082 // CHECK-RV64-NEXT:  entry:
1083 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], <vscale x 1 x i16> [[V2:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
1084 // CHECK-RV64-NEXT:    ret void
1085 //
test_vsseg3e16_v_u16mf4(uint16_t * base,vuint16mf4_t v0,vuint16mf4_t v1,vuint16mf4_t v2,size_t vl)1086 void test_vsseg3e16_v_u16mf4 (uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) {
1087   return vsseg3e16_v_u16mf4(base, v0, v1, v2, vl);
1088 }
1089 
1090 // CHECK-RV64-LABEL: @test_vsseg4e16_v_u16mf4(
1091 // CHECK-RV64-NEXT:  entry:
1092 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], <vscale x 1 x i16> [[V2:%.*]], <vscale x 1 x i16> [[V3:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
1093 // CHECK-RV64-NEXT:    ret void
1094 //
test_vsseg4e16_v_u16mf4(uint16_t * base,vuint16mf4_t v0,vuint16mf4_t v1,vuint16mf4_t v2,vuint16mf4_t v3,size_t vl)1095 void test_vsseg4e16_v_u16mf4 (uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) {
1096   return vsseg4e16_v_u16mf4(base, v0, v1, v2, v3, vl);
1097 }
1098 
1099 // CHECK-RV64-LABEL: @test_vsseg5e16_v_u16mf4(
1100 // CHECK-RV64-NEXT:  entry:
1101 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], <vscale x 1 x i16> [[V2:%.*]], <vscale x 1 x i16> [[V3:%.*]], <vscale x 1 x i16> [[V4:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
1102 // CHECK-RV64-NEXT:    ret void
1103 //
test_vsseg5e16_v_u16mf4(uint16_t * base,vuint16mf4_t v0,vuint16mf4_t v1,vuint16mf4_t v2,vuint16mf4_t v3,vuint16mf4_t v4,size_t vl)1104 void test_vsseg5e16_v_u16mf4 (uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) {
1105   return vsseg5e16_v_u16mf4(base, v0, v1, v2, v3, v4, vl);
1106 }
1107 
1108 // CHECK-RV64-LABEL: @test_vsseg6e16_v_u16mf4(
1109 // CHECK-RV64-NEXT:  entry:
1110 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], <vscale x 1 x i16> [[V2:%.*]], <vscale x 1 x i16> [[V3:%.*]], <vscale x 1 x i16> [[V4:%.*]], <vscale x 1 x i16> [[V5:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
1111 // CHECK-RV64-NEXT:    ret void
1112 //
test_vsseg6e16_v_u16mf4(uint16_t * base,vuint16mf4_t v0,vuint16mf4_t v1,vuint16mf4_t v2,vuint16mf4_t v3,vuint16mf4_t v4,vuint16mf4_t v5,size_t vl)1113 void test_vsseg6e16_v_u16mf4 (uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) {
1114   return vsseg6e16_v_u16mf4(base, v0, v1, v2, v3, v4, v5, vl);
1115 }
1116 
1117 // CHECK-RV64-LABEL: @test_vsseg7e16_v_u16mf4(
1118 // CHECK-RV64-NEXT:  entry:
1119 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], <vscale x 1 x i16> [[V2:%.*]], <vscale x 1 x i16> [[V3:%.*]], <vscale x 1 x i16> [[V4:%.*]], <vscale x 1 x i16> [[V5:%.*]], <vscale x 1 x i16> [[V6:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
1120 // CHECK-RV64-NEXT:    ret void
1121 //
test_vsseg7e16_v_u16mf4(uint16_t * base,vuint16mf4_t v0,vuint16mf4_t v1,vuint16mf4_t v2,vuint16mf4_t v3,vuint16mf4_t v4,vuint16mf4_t v5,vuint16mf4_t v6,size_t vl)1122 void test_vsseg7e16_v_u16mf4 (uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) {
1123   return vsseg7e16_v_u16mf4(base, v0, v1, v2, v3, v4, v5, v6, vl);
1124 }
1125 
1126 // CHECK-RV64-LABEL: @test_vsseg8e16_v_u16mf4(
1127 // CHECK-RV64-NEXT:  entry:
1128 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], <vscale x 1 x i16> [[V2:%.*]], <vscale x 1 x i16> [[V3:%.*]], <vscale x 1 x i16> [[V4:%.*]], <vscale x 1 x i16> [[V5:%.*]], <vscale x 1 x i16> [[V6:%.*]], <vscale x 1 x i16> [[V7:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
1129 // CHECK-RV64-NEXT:    ret void
1130 //
test_vsseg8e16_v_u16mf4(uint16_t * base,vuint16mf4_t v0,vuint16mf4_t v1,vuint16mf4_t v2,vuint16mf4_t v3,vuint16mf4_t v4,vuint16mf4_t v5,vuint16mf4_t v6,vuint16mf4_t v7,size_t vl)1131 void test_vsseg8e16_v_u16mf4 (uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) {
1132   return vsseg8e16_v_u16mf4(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
1133 }
1134 
1135 // CHECK-RV64-LABEL: @test_vsseg2e16_v_u16mf2(
1136 // CHECK-RV64-NEXT:  entry:
1137 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
1138 // CHECK-RV64-NEXT:    ret void
1139 //
test_vsseg2e16_v_u16mf2(uint16_t * base,vuint16mf2_t v0,vuint16mf2_t v1,size_t vl)1140 void test_vsseg2e16_v_u16mf2 (uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) {
1141   return vsseg2e16_v_u16mf2(base, v0, v1, vl);
1142 }
1143 
1144 // CHECK-RV64-LABEL: @test_vsseg3e16_v_u16mf2(
1145 // CHECK-RV64-NEXT:  entry:
1146 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], <vscale x 2 x i16> [[V2:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
1147 // CHECK-RV64-NEXT:    ret void
1148 //
test_vsseg3e16_v_u16mf2(uint16_t * base,vuint16mf2_t v0,vuint16mf2_t v1,vuint16mf2_t v2,size_t vl)1149 void test_vsseg3e16_v_u16mf2 (uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) {
1150   return vsseg3e16_v_u16mf2(base, v0, v1, v2, vl);
1151 }
1152 
1153 // CHECK-RV64-LABEL: @test_vsseg4e16_v_u16mf2(
1154 // CHECK-RV64-NEXT:  entry:
1155 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], <vscale x 2 x i16> [[V2:%.*]], <vscale x 2 x i16> [[V3:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
1156 // CHECK-RV64-NEXT:    ret void
1157 //
test_vsseg4e16_v_u16mf2(uint16_t * base,vuint16mf2_t v0,vuint16mf2_t v1,vuint16mf2_t v2,vuint16mf2_t v3,size_t vl)1158 void test_vsseg4e16_v_u16mf2 (uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) {
1159   return vsseg4e16_v_u16mf2(base, v0, v1, v2, v3, vl);
1160 }
1161 
1162 // CHECK-RV64-LABEL: @test_vsseg5e16_v_u16mf2(
1163 // CHECK-RV64-NEXT:  entry:
1164 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], <vscale x 2 x i16> [[V2:%.*]], <vscale x 2 x i16> [[V3:%.*]], <vscale x 2 x i16> [[V4:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
1165 // CHECK-RV64-NEXT:    ret void
1166 //
test_vsseg5e16_v_u16mf2(uint16_t * base,vuint16mf2_t v0,vuint16mf2_t v1,vuint16mf2_t v2,vuint16mf2_t v3,vuint16mf2_t v4,size_t vl)1167 void test_vsseg5e16_v_u16mf2 (uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) {
1168   return vsseg5e16_v_u16mf2(base, v0, v1, v2, v3, v4, vl);
1169 }
1170 
1171 // CHECK-RV64-LABEL: @test_vsseg6e16_v_u16mf2(
1172 // CHECK-RV64-NEXT:  entry:
1173 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], <vscale x 2 x i16> [[V2:%.*]], <vscale x 2 x i16> [[V3:%.*]], <vscale x 2 x i16> [[V4:%.*]], <vscale x 2 x i16> [[V5:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
1174 // CHECK-RV64-NEXT:    ret void
1175 //
test_vsseg6e16_v_u16mf2(uint16_t * base,vuint16mf2_t v0,vuint16mf2_t v1,vuint16mf2_t v2,vuint16mf2_t v3,vuint16mf2_t v4,vuint16mf2_t v5,size_t vl)1176 void test_vsseg6e16_v_u16mf2 (uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) {
1177   return vsseg6e16_v_u16mf2(base, v0, v1, v2, v3, v4, v5, vl);
1178 }
1179 
1180 // CHECK-RV64-LABEL: @test_vsseg7e16_v_u16mf2(
1181 // CHECK-RV64-NEXT:  entry:
1182 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], <vscale x 2 x i16> [[V2:%.*]], <vscale x 2 x i16> [[V3:%.*]], <vscale x 2 x i16> [[V4:%.*]], <vscale x 2 x i16> [[V5:%.*]], <vscale x 2 x i16> [[V6:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
1183 // CHECK-RV64-NEXT:    ret void
1184 //
test_vsseg7e16_v_u16mf2(uint16_t * base,vuint16mf2_t v0,vuint16mf2_t v1,vuint16mf2_t v2,vuint16mf2_t v3,vuint16mf2_t v4,vuint16mf2_t v5,vuint16mf2_t v6,size_t vl)1185 void test_vsseg7e16_v_u16mf2 (uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) {
1186   return vsseg7e16_v_u16mf2(base, v0, v1, v2, v3, v4, v5, v6, vl);
1187 }
1188 
1189 // CHECK-RV64-LABEL: @test_vsseg8e16_v_u16mf2(
1190 // CHECK-RV64-NEXT:  entry:
1191 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], <vscale x 2 x i16> [[V2:%.*]], <vscale x 2 x i16> [[V3:%.*]], <vscale x 2 x i16> [[V4:%.*]], <vscale x 2 x i16> [[V5:%.*]], <vscale x 2 x i16> [[V6:%.*]], <vscale x 2 x i16> [[V7:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
1192 // CHECK-RV64-NEXT:    ret void
1193 //
test_vsseg8e16_v_u16mf2(uint16_t * base,vuint16mf2_t v0,vuint16mf2_t v1,vuint16mf2_t v2,vuint16mf2_t v3,vuint16mf2_t v4,vuint16mf2_t v5,vuint16mf2_t v6,vuint16mf2_t v7,size_t vl)1194 void test_vsseg8e16_v_u16mf2 (uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) {
1195   return vsseg8e16_v_u16mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
1196 }
1197 
1198 // CHECK-RV64-LABEL: @test_vsseg2e16_v_u16m1(
1199 // CHECK-RV64-NEXT:  entry:
1200 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
1201 // CHECK-RV64-NEXT:    ret void
1202 //
test_vsseg2e16_v_u16m1(uint16_t * base,vuint16m1_t v0,vuint16m1_t v1,size_t vl)1203 void test_vsseg2e16_v_u16m1 (uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, size_t vl) {
1204   return vsseg2e16_v_u16m1(base, v0, v1, vl);
1205 }
1206 
1207 // CHECK-RV64-LABEL: @test_vsseg3e16_v_u16m1(
1208 // CHECK-RV64-NEXT:  entry:
1209 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], <vscale x 4 x i16> [[V2:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
1210 // CHECK-RV64-NEXT:    ret void
1211 //
test_vsseg3e16_v_u16m1(uint16_t * base,vuint16m1_t v0,vuint16m1_t v1,vuint16m1_t v2,size_t vl)1212 void test_vsseg3e16_v_u16m1 (uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) {
1213   return vsseg3e16_v_u16m1(base, v0, v1, v2, vl);
1214 }
1215 
1216 // CHECK-RV64-LABEL: @test_vsseg4e16_v_u16m1(
1217 // CHECK-RV64-NEXT:  entry:
1218 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], <vscale x 4 x i16> [[V2:%.*]], <vscale x 4 x i16> [[V3:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
1219 // CHECK-RV64-NEXT:    ret void
1220 //
test_vsseg4e16_v_u16m1(uint16_t * base,vuint16m1_t v0,vuint16m1_t v1,vuint16m1_t v2,vuint16m1_t v3,size_t vl)1221 void test_vsseg4e16_v_u16m1 (uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) {
1222   return vsseg4e16_v_u16m1(base, v0, v1, v2, v3, vl);
1223 }
1224 
1225 // CHECK-RV64-LABEL: @test_vsseg5e16_v_u16m1(
1226 // CHECK-RV64-NEXT:  entry:
1227 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], <vscale x 4 x i16> [[V2:%.*]], <vscale x 4 x i16> [[V3:%.*]], <vscale x 4 x i16> [[V4:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
1228 // CHECK-RV64-NEXT:    ret void
1229 //
test_vsseg5e16_v_u16m1(uint16_t * base,vuint16m1_t v0,vuint16m1_t v1,vuint16m1_t v2,vuint16m1_t v3,vuint16m1_t v4,size_t vl)1230 void test_vsseg5e16_v_u16m1 (uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) {
1231   return vsseg5e16_v_u16m1(base, v0, v1, v2, v3, v4, vl);
1232 }
1233 
1234 // CHECK-RV64-LABEL: @test_vsseg6e16_v_u16m1(
1235 // CHECK-RV64-NEXT:  entry:
1236 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], <vscale x 4 x i16> [[V2:%.*]], <vscale x 4 x i16> [[V3:%.*]], <vscale x 4 x i16> [[V4:%.*]], <vscale x 4 x i16> [[V5:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
1237 // CHECK-RV64-NEXT:    ret void
1238 //
test_vsseg6e16_v_u16m1(uint16_t * base,vuint16m1_t v0,vuint16m1_t v1,vuint16m1_t v2,vuint16m1_t v3,vuint16m1_t v4,vuint16m1_t v5,size_t vl)1239 void test_vsseg6e16_v_u16m1 (uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) {
1240   return vsseg6e16_v_u16m1(base, v0, v1, v2, v3, v4, v5, vl);
1241 }
1242 
1243 // CHECK-RV64-LABEL: @test_vsseg7e16_v_u16m1(
1244 // CHECK-RV64-NEXT:  entry:
1245 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], <vscale x 4 x i16> [[V2:%.*]], <vscale x 4 x i16> [[V3:%.*]], <vscale x 4 x i16> [[V4:%.*]], <vscale x 4 x i16> [[V5:%.*]], <vscale x 4 x i16> [[V6:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
1246 // CHECK-RV64-NEXT:    ret void
1247 //
test_vsseg7e16_v_u16m1(uint16_t * base,vuint16m1_t v0,vuint16m1_t v1,vuint16m1_t v2,vuint16m1_t v3,vuint16m1_t v4,vuint16m1_t v5,vuint16m1_t v6,size_t vl)1248 void test_vsseg7e16_v_u16m1 (uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) {
1249   return vsseg7e16_v_u16m1(base, v0, v1, v2, v3, v4, v5, v6, vl);
1250 }
1251 
1252 // CHECK-RV64-LABEL: @test_vsseg8e16_v_u16m1(
1253 // CHECK-RV64-NEXT:  entry:
1254 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], <vscale x 4 x i16> [[V2:%.*]], <vscale x 4 x i16> [[V3:%.*]], <vscale x 4 x i16> [[V4:%.*]], <vscale x 4 x i16> [[V5:%.*]], <vscale x 4 x i16> [[V6:%.*]], <vscale x 4 x i16> [[V7:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
1255 // CHECK-RV64-NEXT:    ret void
1256 //
test_vsseg8e16_v_u16m1(uint16_t * base,vuint16m1_t v0,vuint16m1_t v1,vuint16m1_t v2,vuint16m1_t v3,vuint16m1_t v4,vuint16m1_t v5,vuint16m1_t v6,vuint16m1_t v7,size_t vl)1257 void test_vsseg8e16_v_u16m1 (uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) {
1258   return vsseg8e16_v_u16m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
1259 }
1260 
1261 // CHECK-RV64-LABEL: @test_vsseg2e16_v_u16m2(
1262 // CHECK-RV64-NEXT:  entry:
1263 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv8i16.i64(<vscale x 8 x i16> [[V0:%.*]], <vscale x 8 x i16> [[V1:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
1264 // CHECK-RV64-NEXT:    ret void
1265 //
test_vsseg2e16_v_u16m2(uint16_t * base,vuint16m2_t v0,vuint16m2_t v1,size_t vl)1266 void test_vsseg2e16_v_u16m2 (uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, size_t vl) {
1267   return vsseg2e16_v_u16m2(base, v0, v1, vl);
1268 }
1269 
1270 // CHECK-RV64-LABEL: @test_vsseg3e16_v_u16m2(
1271 // CHECK-RV64-NEXT:  entry:
1272 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv8i16.i64(<vscale x 8 x i16> [[V0:%.*]], <vscale x 8 x i16> [[V1:%.*]], <vscale x 8 x i16> [[V2:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
1273 // CHECK-RV64-NEXT:    ret void
1274 //
test_vsseg3e16_v_u16m2(uint16_t * base,vuint16m2_t v0,vuint16m2_t v1,vuint16m2_t v2,size_t vl)1275 void test_vsseg3e16_v_u16m2 (uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) {
1276   return vsseg3e16_v_u16m2(base, v0, v1, v2, vl);
1277 }
1278 
1279 // CHECK-RV64-LABEL: @test_vsseg4e16_v_u16m2(
1280 // CHECK-RV64-NEXT:  entry:
1281 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv8i16.i64(<vscale x 8 x i16> [[V0:%.*]], <vscale x 8 x i16> [[V1:%.*]], <vscale x 8 x i16> [[V2:%.*]], <vscale x 8 x i16> [[V3:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
1282 // CHECK-RV64-NEXT:    ret void
1283 //
test_vsseg4e16_v_u16m2(uint16_t * base,vuint16m2_t v0,vuint16m2_t v1,vuint16m2_t v2,vuint16m2_t v3,size_t vl)1284 void test_vsseg4e16_v_u16m2 (uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) {
1285   return vsseg4e16_v_u16m2(base, v0, v1, v2, v3, vl);
1286 }
1287 
1288 // CHECK-RV64-LABEL: @test_vsseg2e16_v_u16m4(
1289 // CHECK-RV64-NEXT:  entry:
1290 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv16i16.i64(<vscale x 16 x i16> [[V0:%.*]], <vscale x 16 x i16> [[V1:%.*]], i16* [[BASE:%.*]], i64 [[VL:%.*]])
1291 // CHECK-RV64-NEXT:    ret void
1292 //
test_vsseg2e16_v_u16m4(uint16_t * base,vuint16m4_t v0,vuint16m4_t v1,size_t vl)1293 void test_vsseg2e16_v_u16m4 (uint16_t *base, vuint16m4_t v0, vuint16m4_t v1, size_t vl) {
1294   return vsseg2e16_v_u16m4(base, v0, v1, vl);
1295 }
1296 
1297 // CHECK-RV64-LABEL: @test_vsseg2e32_v_u32mf2(
1298 // CHECK-RV64-NEXT:  entry:
1299 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
1300 // CHECK-RV64-NEXT:    ret void
1301 //
test_vsseg2e32_v_u32mf2(uint32_t * base,vuint32mf2_t v0,vuint32mf2_t v1,size_t vl)1302 void test_vsseg2e32_v_u32mf2 (uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) {
1303   return vsseg2e32_v_u32mf2(base, v0, v1, vl);
1304 }
1305 
1306 // CHECK-RV64-LABEL: @test_vsseg3e32_v_u32mf2(
1307 // CHECK-RV64-NEXT:  entry:
1308 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], <vscale x 1 x i32> [[V2:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
1309 // CHECK-RV64-NEXT:    ret void
1310 //
test_vsseg3e32_v_u32mf2(uint32_t * base,vuint32mf2_t v0,vuint32mf2_t v1,vuint32mf2_t v2,size_t vl)1311 void test_vsseg3e32_v_u32mf2 (uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) {
1312   return vsseg3e32_v_u32mf2(base, v0, v1, v2, vl);
1313 }
1314 
1315 // CHECK-RV64-LABEL: @test_vsseg4e32_v_u32mf2(
1316 // CHECK-RV64-NEXT:  entry:
1317 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], <vscale x 1 x i32> [[V2:%.*]], <vscale x 1 x i32> [[V3:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
1318 // CHECK-RV64-NEXT:    ret void
1319 //
test_vsseg4e32_v_u32mf2(uint32_t * base,vuint32mf2_t v0,vuint32mf2_t v1,vuint32mf2_t v2,vuint32mf2_t v3,size_t vl)1320 void test_vsseg4e32_v_u32mf2 (uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) {
1321   return vsseg4e32_v_u32mf2(base, v0, v1, v2, v3, vl);
1322 }
1323 
1324 // CHECK-RV64-LABEL: @test_vsseg5e32_v_u32mf2(
1325 // CHECK-RV64-NEXT:  entry:
1326 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], <vscale x 1 x i32> [[V2:%.*]], <vscale x 1 x i32> [[V3:%.*]], <vscale x 1 x i32> [[V4:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
1327 // CHECK-RV64-NEXT:    ret void
1328 //
test_vsseg5e32_v_u32mf2(uint32_t * base,vuint32mf2_t v0,vuint32mf2_t v1,vuint32mf2_t v2,vuint32mf2_t v3,vuint32mf2_t v4,size_t vl)1329 void test_vsseg5e32_v_u32mf2 (uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) {
1330   return vsseg5e32_v_u32mf2(base, v0, v1, v2, v3, v4, vl);
1331 }
1332 
1333 // CHECK-RV64-LABEL: @test_vsseg6e32_v_u32mf2(
1334 // CHECK-RV64-NEXT:  entry:
1335 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], <vscale x 1 x i32> [[V2:%.*]], <vscale x 1 x i32> [[V3:%.*]], <vscale x 1 x i32> [[V4:%.*]], <vscale x 1 x i32> [[V5:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
1336 // CHECK-RV64-NEXT:    ret void
1337 //
test_vsseg6e32_v_u32mf2(uint32_t * base,vuint32mf2_t v0,vuint32mf2_t v1,vuint32mf2_t v2,vuint32mf2_t v3,vuint32mf2_t v4,vuint32mf2_t v5,size_t vl)1338 void test_vsseg6e32_v_u32mf2 (uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) {
1339   return vsseg6e32_v_u32mf2(base, v0, v1, v2, v3, v4, v5, vl);
1340 }
1341 
1342 // CHECK-RV64-LABEL: @test_vsseg7e32_v_u32mf2(
1343 // CHECK-RV64-NEXT:  entry:
1344 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], <vscale x 1 x i32> [[V2:%.*]], <vscale x 1 x i32> [[V3:%.*]], <vscale x 1 x i32> [[V4:%.*]], <vscale x 1 x i32> [[V5:%.*]], <vscale x 1 x i32> [[V6:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
1345 // CHECK-RV64-NEXT:    ret void
1346 //
test_vsseg7e32_v_u32mf2(uint32_t * base,vuint32mf2_t v0,vuint32mf2_t v1,vuint32mf2_t v2,vuint32mf2_t v3,vuint32mf2_t v4,vuint32mf2_t v5,vuint32mf2_t v6,size_t vl)1347 void test_vsseg7e32_v_u32mf2 (uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) {
1348   return vsseg7e32_v_u32mf2(base, v0, v1, v2, v3, v4, v5, v6, vl);
1349 }
1350 
1351 // CHECK-RV64-LABEL: @test_vsseg8e32_v_u32mf2(
1352 // CHECK-RV64-NEXT:  entry:
1353 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], <vscale x 1 x i32> [[V2:%.*]], <vscale x 1 x i32> [[V3:%.*]], <vscale x 1 x i32> [[V4:%.*]], <vscale x 1 x i32> [[V5:%.*]], <vscale x 1 x i32> [[V6:%.*]], <vscale x 1 x i32> [[V7:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
1354 // CHECK-RV64-NEXT:    ret void
1355 //
test_vsseg8e32_v_u32mf2(uint32_t * base,vuint32mf2_t v0,vuint32mf2_t v1,vuint32mf2_t v2,vuint32mf2_t v3,vuint32mf2_t v4,vuint32mf2_t v5,vuint32mf2_t v6,vuint32mf2_t v7,size_t vl)1356 void test_vsseg8e32_v_u32mf2 (uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) {
1357   return vsseg8e32_v_u32mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
1358 }
1359 
1360 // CHECK-RV64-LABEL: @test_vsseg2e32_v_u32m1(
1361 // CHECK-RV64-NEXT:  entry:
1362 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
1363 // CHECK-RV64-NEXT:    ret void
1364 //
test_vsseg2e32_v_u32m1(uint32_t * base,vuint32m1_t v0,vuint32m1_t v1,size_t vl)1365 void test_vsseg2e32_v_u32m1 (uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, size_t vl) {
1366   return vsseg2e32_v_u32m1(base, v0, v1, vl);
1367 }
1368 
1369 // CHECK-RV64-LABEL: @test_vsseg3e32_v_u32m1(
1370 // CHECK-RV64-NEXT:  entry:
1371 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], <vscale x 2 x i32> [[V2:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
1372 // CHECK-RV64-NEXT:    ret void
1373 //
test_vsseg3e32_v_u32m1(uint32_t * base,vuint32m1_t v0,vuint32m1_t v1,vuint32m1_t v2,size_t vl)1374 void test_vsseg3e32_v_u32m1 (uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) {
1375   return vsseg3e32_v_u32m1(base, v0, v1, v2, vl);
1376 }
1377 
1378 // CHECK-RV64-LABEL: @test_vsseg4e32_v_u32m1(
1379 // CHECK-RV64-NEXT:  entry:
1380 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], <vscale x 2 x i32> [[V2:%.*]], <vscale x 2 x i32> [[V3:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
1381 // CHECK-RV64-NEXT:    ret void
1382 //
test_vsseg4e32_v_u32m1(uint32_t * base,vuint32m1_t v0,vuint32m1_t v1,vuint32m1_t v2,vuint32m1_t v3,size_t vl)1383 void test_vsseg4e32_v_u32m1 (uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) {
1384   return vsseg4e32_v_u32m1(base, v0, v1, v2, v3, vl);
1385 }
1386 
1387 // CHECK-RV64-LABEL: @test_vsseg5e32_v_u32m1(
1388 // CHECK-RV64-NEXT:  entry:
1389 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], <vscale x 2 x i32> [[V2:%.*]], <vscale x 2 x i32> [[V3:%.*]], <vscale x 2 x i32> [[V4:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
1390 // CHECK-RV64-NEXT:    ret void
1391 //
test_vsseg5e32_v_u32m1(uint32_t * base,vuint32m1_t v0,vuint32m1_t v1,vuint32m1_t v2,vuint32m1_t v3,vuint32m1_t v4,size_t vl)1392 void test_vsseg5e32_v_u32m1 (uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) {
1393   return vsseg5e32_v_u32m1(base, v0, v1, v2, v3, v4, vl);
1394 }
1395 
1396 // CHECK-RV64-LABEL: @test_vsseg6e32_v_u32m1(
1397 // CHECK-RV64-NEXT:  entry:
1398 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], <vscale x 2 x i32> [[V2:%.*]], <vscale x 2 x i32> [[V3:%.*]], <vscale x 2 x i32> [[V4:%.*]], <vscale x 2 x i32> [[V5:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
1399 // CHECK-RV64-NEXT:    ret void
1400 //
test_vsseg6e32_v_u32m1(uint32_t * base,vuint32m1_t v0,vuint32m1_t v1,vuint32m1_t v2,vuint32m1_t v3,vuint32m1_t v4,vuint32m1_t v5,size_t vl)1401 void test_vsseg6e32_v_u32m1 (uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) {
1402   return vsseg6e32_v_u32m1(base, v0, v1, v2, v3, v4, v5, vl);
1403 }
1404 
1405 // CHECK-RV64-LABEL: @test_vsseg7e32_v_u32m1(
1406 // CHECK-RV64-NEXT:  entry:
1407 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], <vscale x 2 x i32> [[V2:%.*]], <vscale x 2 x i32> [[V3:%.*]], <vscale x 2 x i32> [[V4:%.*]], <vscale x 2 x i32> [[V5:%.*]], <vscale x 2 x i32> [[V6:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
1408 // CHECK-RV64-NEXT:    ret void
1409 //
test_vsseg7e32_v_u32m1(uint32_t * base,vuint32m1_t v0,vuint32m1_t v1,vuint32m1_t v2,vuint32m1_t v3,vuint32m1_t v4,vuint32m1_t v5,vuint32m1_t v6,size_t vl)1410 void test_vsseg7e32_v_u32m1 (uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) {
1411   return vsseg7e32_v_u32m1(base, v0, v1, v2, v3, v4, v5, v6, vl);
1412 }
1413 
1414 // CHECK-RV64-LABEL: @test_vsseg8e32_v_u32m1(
1415 // CHECK-RV64-NEXT:  entry:
1416 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], <vscale x 2 x i32> [[V2:%.*]], <vscale x 2 x i32> [[V3:%.*]], <vscale x 2 x i32> [[V4:%.*]], <vscale x 2 x i32> [[V5:%.*]], <vscale x 2 x i32> [[V6:%.*]], <vscale x 2 x i32> [[V7:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
1417 // CHECK-RV64-NEXT:    ret void
1418 //
test_vsseg8e32_v_u32m1(uint32_t * base,vuint32m1_t v0,vuint32m1_t v1,vuint32m1_t v2,vuint32m1_t v3,vuint32m1_t v4,vuint32m1_t v5,vuint32m1_t v6,vuint32m1_t v7,size_t vl)1419 void test_vsseg8e32_v_u32m1 (uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) {
1420   return vsseg8e32_v_u32m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
1421 }
1422 
1423 // CHECK-RV64-LABEL: @test_vsseg2e32_v_u32m2(
1424 // CHECK-RV64-NEXT:  entry:
1425 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv4i32.i64(<vscale x 4 x i32> [[V0:%.*]], <vscale x 4 x i32> [[V1:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
1426 // CHECK-RV64-NEXT:    ret void
1427 //
test_vsseg2e32_v_u32m2(uint32_t * base,vuint32m2_t v0,vuint32m2_t v1,size_t vl)1428 void test_vsseg2e32_v_u32m2 (uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, size_t vl) {
1429   return vsseg2e32_v_u32m2(base, v0, v1, vl);
1430 }
1431 
1432 // CHECK-RV64-LABEL: @test_vsseg3e32_v_u32m2(
1433 // CHECK-RV64-NEXT:  entry:
1434 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv4i32.i64(<vscale x 4 x i32> [[V0:%.*]], <vscale x 4 x i32> [[V1:%.*]], <vscale x 4 x i32> [[V2:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
1435 // CHECK-RV64-NEXT:    ret void
1436 //
test_vsseg3e32_v_u32m2(uint32_t * base,vuint32m2_t v0,vuint32m2_t v1,vuint32m2_t v2,size_t vl)1437 void test_vsseg3e32_v_u32m2 (uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) {
1438   return vsseg3e32_v_u32m2(base, v0, v1, v2, vl);
1439 }
1440 
1441 // CHECK-RV64-LABEL: @test_vsseg4e32_v_u32m2(
1442 // CHECK-RV64-NEXT:  entry:
1443 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv4i32.i64(<vscale x 4 x i32> [[V0:%.*]], <vscale x 4 x i32> [[V1:%.*]], <vscale x 4 x i32> [[V2:%.*]], <vscale x 4 x i32> [[V3:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
1444 // CHECK-RV64-NEXT:    ret void
1445 //
test_vsseg4e32_v_u32m2(uint32_t * base,vuint32m2_t v0,vuint32m2_t v1,vuint32m2_t v2,vuint32m2_t v3,size_t vl)1446 void test_vsseg4e32_v_u32m2 (uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) {
1447   return vsseg4e32_v_u32m2(base, v0, v1, v2, v3, vl);
1448 }
1449 
1450 // CHECK-RV64-LABEL: @test_vsseg2e32_v_u32m4(
1451 // CHECK-RV64-NEXT:  entry:
1452 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv8i32.i64(<vscale x 8 x i32> [[V0:%.*]], <vscale x 8 x i32> [[V1:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
1453 // CHECK-RV64-NEXT:    ret void
1454 //
test_vsseg2e32_v_u32m4(uint32_t * base,vuint32m4_t v0,vuint32m4_t v1,size_t vl)1455 void test_vsseg2e32_v_u32m4 (uint32_t *base, vuint32m4_t v0, vuint32m4_t v1, size_t vl) {
1456   return vsseg2e32_v_u32m4(base, v0, v1, vl);
1457 }
1458 
1459 // CHECK-RV64-LABEL: @test_vsseg2e64_v_u64m1(
1460 // CHECK-RV64-NEXT:  entry:
1461 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]])
1462 // CHECK-RV64-NEXT:    ret void
1463 //
test_vsseg2e64_v_u64m1(uint64_t * base,vuint64m1_t v0,vuint64m1_t v1,size_t vl)1464 void test_vsseg2e64_v_u64m1 (uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, size_t vl) {
1465   return vsseg2e64_v_u64m1(base, v0, v1, vl);
1466 }
1467 
1468 // CHECK-RV64-LABEL: @test_vsseg3e64_v_u64m1(
1469 // CHECK-RV64-NEXT:  entry:
1470 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], <vscale x 1 x i64> [[V2:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]])
1471 // CHECK-RV64-NEXT:    ret void
1472 //
test_vsseg3e64_v_u64m1(uint64_t * base,vuint64m1_t v0,vuint64m1_t v1,vuint64m1_t v2,size_t vl)1473 void test_vsseg3e64_v_u64m1 (uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) {
1474   return vsseg3e64_v_u64m1(base, v0, v1, v2, vl);
1475 }
1476 
1477 // CHECK-RV64-LABEL: @test_vsseg4e64_v_u64m1(
1478 // CHECK-RV64-NEXT:  entry:
1479 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], <vscale x 1 x i64> [[V2:%.*]], <vscale x 1 x i64> [[V3:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]])
1480 // CHECK-RV64-NEXT:    ret void
1481 //
test_vsseg4e64_v_u64m1(uint64_t * base,vuint64m1_t v0,vuint64m1_t v1,vuint64m1_t v2,vuint64m1_t v3,size_t vl)1482 void test_vsseg4e64_v_u64m1 (uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) {
1483   return vsseg4e64_v_u64m1(base, v0, v1, v2, v3, vl);
1484 }
1485 
1486 // CHECK-RV64-LABEL: @test_vsseg5e64_v_u64m1(
1487 // CHECK-RV64-NEXT:  entry:
1488 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], <vscale x 1 x i64> [[V2:%.*]], <vscale x 1 x i64> [[V3:%.*]], <vscale x 1 x i64> [[V4:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]])
1489 // CHECK-RV64-NEXT:    ret void
1490 //
test_vsseg5e64_v_u64m1(uint64_t * base,vuint64m1_t v0,vuint64m1_t v1,vuint64m1_t v2,vuint64m1_t v3,vuint64m1_t v4,size_t vl)1491 void test_vsseg5e64_v_u64m1 (uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) {
1492   return vsseg5e64_v_u64m1(base, v0, v1, v2, v3, v4, vl);
1493 }
1494 
1495 // CHECK-RV64-LABEL: @test_vsseg6e64_v_u64m1(
1496 // CHECK-RV64-NEXT:  entry:
1497 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], <vscale x 1 x i64> [[V2:%.*]], <vscale x 1 x i64> [[V3:%.*]], <vscale x 1 x i64> [[V4:%.*]], <vscale x 1 x i64> [[V5:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]])
1498 // CHECK-RV64-NEXT:    ret void
1499 //
test_vsseg6e64_v_u64m1(uint64_t * base,vuint64m1_t v0,vuint64m1_t v1,vuint64m1_t v2,vuint64m1_t v3,vuint64m1_t v4,vuint64m1_t v5,size_t vl)1500 void test_vsseg6e64_v_u64m1 (uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) {
1501   return vsseg6e64_v_u64m1(base, v0, v1, v2, v3, v4, v5, vl);
1502 }
1503 
1504 // CHECK-RV64-LABEL: @test_vsseg7e64_v_u64m1(
1505 // CHECK-RV64-NEXT:  entry:
1506 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], <vscale x 1 x i64> [[V2:%.*]], <vscale x 1 x i64> [[V3:%.*]], <vscale x 1 x i64> [[V4:%.*]], <vscale x 1 x i64> [[V5:%.*]], <vscale x 1 x i64> [[V6:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]])
1507 // CHECK-RV64-NEXT:    ret void
1508 //
test_vsseg7e64_v_u64m1(uint64_t * base,vuint64m1_t v0,vuint64m1_t v1,vuint64m1_t v2,vuint64m1_t v3,vuint64m1_t v4,vuint64m1_t v5,vuint64m1_t v6,size_t vl)1509 void test_vsseg7e64_v_u64m1 (uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) {
1510   return vsseg7e64_v_u64m1(base, v0, v1, v2, v3, v4, v5, v6, vl);
1511 }
1512 
1513 // CHECK-RV64-LABEL: @test_vsseg8e64_v_u64m1(
1514 // CHECK-RV64-NEXT:  entry:
1515 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], <vscale x 1 x i64> [[V2:%.*]], <vscale x 1 x i64> [[V3:%.*]], <vscale x 1 x i64> [[V4:%.*]], <vscale x 1 x i64> [[V5:%.*]], <vscale x 1 x i64> [[V6:%.*]], <vscale x 1 x i64> [[V7:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]])
1516 // CHECK-RV64-NEXT:    ret void
1517 //
test_vsseg8e64_v_u64m1(uint64_t * base,vuint64m1_t v0,vuint64m1_t v1,vuint64m1_t v2,vuint64m1_t v3,vuint64m1_t v4,vuint64m1_t v5,vuint64m1_t v6,vuint64m1_t v7,size_t vl)1518 void test_vsseg8e64_v_u64m1 (uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) {
1519   return vsseg8e64_v_u64m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
1520 }
1521 
1522 // CHECK-RV64-LABEL: @test_vsseg2e64_v_u64m2(
1523 // CHECK-RV64-NEXT:  entry:
1524 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv2i64.i64(<vscale x 2 x i64> [[V0:%.*]], <vscale x 2 x i64> [[V1:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]])
1525 // CHECK-RV64-NEXT:    ret void
1526 //
test_vsseg2e64_v_u64m2(uint64_t * base,vuint64m2_t v0,vuint64m2_t v1,size_t vl)1527 void test_vsseg2e64_v_u64m2 (uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, size_t vl) {
1528   return vsseg2e64_v_u64m2(base, v0, v1, vl);
1529 }
1530 
1531 // CHECK-RV64-LABEL: @test_vsseg3e64_v_u64m2(
1532 // CHECK-RV64-NEXT:  entry:
1533 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv2i64.i64(<vscale x 2 x i64> [[V0:%.*]], <vscale x 2 x i64> [[V1:%.*]], <vscale x 2 x i64> [[V2:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]])
1534 // CHECK-RV64-NEXT:    ret void
1535 //
test_vsseg3e64_v_u64m2(uint64_t * base,vuint64m2_t v0,vuint64m2_t v1,vuint64m2_t v2,size_t vl)1536 void test_vsseg3e64_v_u64m2 (uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) {
1537   return vsseg3e64_v_u64m2(base, v0, v1, v2, vl);
1538 }
1539 
1540 // CHECK-RV64-LABEL: @test_vsseg4e64_v_u64m2(
1541 // CHECK-RV64-NEXT:  entry:
1542 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv2i64.i64(<vscale x 2 x i64> [[V0:%.*]], <vscale x 2 x i64> [[V1:%.*]], <vscale x 2 x i64> [[V2:%.*]], <vscale x 2 x i64> [[V3:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]])
1543 // CHECK-RV64-NEXT:    ret void
1544 //
test_vsseg4e64_v_u64m2(uint64_t * base,vuint64m2_t v0,vuint64m2_t v1,vuint64m2_t v2,vuint64m2_t v3,size_t vl)1545 void test_vsseg4e64_v_u64m2 (uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) {
1546   return vsseg4e64_v_u64m2(base, v0, v1, v2, v3, vl);
1547 }
1548 
1549 // CHECK-RV64-LABEL: @test_vsseg2e64_v_u64m4(
1550 // CHECK-RV64-NEXT:  entry:
1551 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv4i64.i64(<vscale x 4 x i64> [[V0:%.*]], <vscale x 4 x i64> [[V1:%.*]], i64* [[BASE:%.*]], i64 [[VL:%.*]])
1552 // CHECK-RV64-NEXT:    ret void
1553 //
test_vsseg2e64_v_u64m4(uint64_t * base,vuint64m4_t v0,vuint64m4_t v1,size_t vl)1554 void test_vsseg2e64_v_u64m4 (uint64_t *base, vuint64m4_t v0, vuint64m4_t v1, size_t vl) {
1555   return vsseg2e64_v_u64m4(base, v0, v1, vl);
1556 }
1557 
1558 // CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf4(
1559 // CHECK-RV64-NEXT:  entry:
1560 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
1561 // CHECK-RV64-NEXT:    ret void
1562 //
test_vsseg2e16_v_f16mf4(_Float16 * base,vfloat16mf4_t v0,vfloat16mf4_t v1,size_t vl)1563 void test_vsseg2e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
1564   return vsseg2e16_v_f16mf4(base, v0, v1, vl);
1565 }
1566 
1567 // CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf4(
1568 // CHECK-RV64-NEXT:  entry:
1569 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
1570 // CHECK-RV64-NEXT:    ret void
1571 //
test_vsseg3e16_v_f16mf4(_Float16 * base,vfloat16mf4_t v0,vfloat16mf4_t v1,vfloat16mf4_t v2,size_t vl)1572 void test_vsseg3e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
1573   return vsseg3e16_v_f16mf4(base, v0, v1, v2, vl);
1574 }
1575 
1576 // CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf4(
1577 // CHECK-RV64-NEXT:  entry:
1578 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
1579 // CHECK-RV64-NEXT:    ret void
1580 //
test_vsseg4e16_v_f16mf4(_Float16 * base,vfloat16mf4_t v0,vfloat16mf4_t v1,vfloat16mf4_t v2,vfloat16mf4_t v3,size_t vl)1581 void test_vsseg4e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
1582   return vsseg4e16_v_f16mf4(base, v0, v1, v2, v3, vl);
1583 }
1584 
1585 // CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf4(
1586 // CHECK-RV64-NEXT:  entry:
1587 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
1588 // CHECK-RV64-NEXT:    ret void
1589 //
test_vsseg5e16_v_f16mf4(_Float16 * base,vfloat16mf4_t v0,vfloat16mf4_t v1,vfloat16mf4_t v2,vfloat16mf4_t v3,vfloat16mf4_t v4,size_t vl)1590 void test_vsseg5e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
1591   return vsseg5e16_v_f16mf4(base, v0, v1, v2, v3, v4, vl);
1592 }
1593 
1594 // CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf4(
1595 // CHECK-RV64-NEXT:  entry:
1596 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
1597 // CHECK-RV64-NEXT:    ret void
1598 //
test_vsseg6e16_v_f16mf4(_Float16 * base,vfloat16mf4_t v0,vfloat16mf4_t v1,vfloat16mf4_t v2,vfloat16mf4_t v3,vfloat16mf4_t v4,vfloat16mf4_t v5,size_t vl)1599 void test_vsseg6e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
1600   return vsseg6e16_v_f16mf4(base, v0, v1, v2, v3, v4, v5, vl);
1601 }
1602 
1603 // CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf4(
1604 // CHECK-RV64-NEXT:  entry:
1605 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
1606 // CHECK-RV64-NEXT:    ret void
1607 //
test_vsseg7e16_v_f16mf4(_Float16 * base,vfloat16mf4_t v0,vfloat16mf4_t v1,vfloat16mf4_t v2,vfloat16mf4_t v3,vfloat16mf4_t v4,vfloat16mf4_t v5,vfloat16mf4_t v6,size_t vl)1608 void test_vsseg7e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
1609   return vsseg7e16_v_f16mf4(base, v0, v1, v2, v3, v4, v5, v6, vl);
1610 }
1611 
1612 // CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf4(
1613 // CHECK-RV64-NEXT:  entry:
1614 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], <vscale x 1 x half> [[V7:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
1615 // CHECK-RV64-NEXT:    ret void
1616 //
test_vsseg8e16_v_f16mf4(_Float16 * base,vfloat16mf4_t v0,vfloat16mf4_t v1,vfloat16mf4_t v2,vfloat16mf4_t v3,vfloat16mf4_t v4,vfloat16mf4_t v5,vfloat16mf4_t v6,vfloat16mf4_t v7,size_t vl)1617 void test_vsseg8e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
1618   return vsseg8e16_v_f16mf4(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
1619 }
1620 
1621 // CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf2(
1622 // CHECK-RV64-NEXT:  entry:
1623 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
1624 // CHECK-RV64-NEXT:    ret void
1625 //
test_vsseg2e16_v_f16mf2(_Float16 * base,vfloat16mf2_t v0,vfloat16mf2_t v1,size_t vl)1626 void test_vsseg2e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
1627   return vsseg2e16_v_f16mf2(base, v0, v1, vl);
1628 }
1629 
1630 // CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf2(
1631 // CHECK-RV64-NEXT:  entry:
1632 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
1633 // CHECK-RV64-NEXT:    ret void
1634 //
test_vsseg3e16_v_f16mf2(_Float16 * base,vfloat16mf2_t v0,vfloat16mf2_t v1,vfloat16mf2_t v2,size_t vl)1635 void test_vsseg3e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
1636   return vsseg3e16_v_f16mf2(base, v0, v1, v2, vl);
1637 }
1638 
1639 // CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf2(
1640 // CHECK-RV64-NEXT:  entry:
1641 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
1642 // CHECK-RV64-NEXT:    ret void
1643 //
test_vsseg4e16_v_f16mf2(_Float16 * base,vfloat16mf2_t v0,vfloat16mf2_t v1,vfloat16mf2_t v2,vfloat16mf2_t v3,size_t vl)1644 void test_vsseg4e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
1645   return vsseg4e16_v_f16mf2(base, v0, v1, v2, v3, vl);
1646 }
1647 
1648 // CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf2(
1649 // CHECK-RV64-NEXT:  entry:
1650 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
1651 // CHECK-RV64-NEXT:    ret void
1652 //
test_vsseg5e16_v_f16mf2(_Float16 * base,vfloat16mf2_t v0,vfloat16mf2_t v1,vfloat16mf2_t v2,vfloat16mf2_t v3,vfloat16mf2_t v4,size_t vl)1653 void test_vsseg5e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
1654   return vsseg5e16_v_f16mf2(base, v0, v1, v2, v3, v4, vl);
1655 }
1656 
1657 // CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf2(
1658 // CHECK-RV64-NEXT:  entry:
1659 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
1660 // CHECK-RV64-NEXT:    ret void
1661 //
test_vsseg6e16_v_f16mf2(_Float16 * base,vfloat16mf2_t v0,vfloat16mf2_t v1,vfloat16mf2_t v2,vfloat16mf2_t v3,vfloat16mf2_t v4,vfloat16mf2_t v5,size_t vl)1662 void test_vsseg6e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
1663   return vsseg6e16_v_f16mf2(base, v0, v1, v2, v3, v4, v5, vl);
1664 }
1665 
1666 // CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf2(
1667 // CHECK-RV64-NEXT:  entry:
1668 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
1669 // CHECK-RV64-NEXT:    ret void
1670 //
test_vsseg7e16_v_f16mf2(_Float16 * base,vfloat16mf2_t v0,vfloat16mf2_t v1,vfloat16mf2_t v2,vfloat16mf2_t v3,vfloat16mf2_t v4,vfloat16mf2_t v5,vfloat16mf2_t v6,size_t vl)1671 void test_vsseg7e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
1672   return vsseg7e16_v_f16mf2(base, v0, v1, v2, v3, v4, v5, v6, vl);
1673 }
1674 
1675 // CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf2(
1676 // CHECK-RV64-NEXT:  entry:
1677 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], <vscale x 2 x half> [[V7:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
1678 // CHECK-RV64-NEXT:    ret void
1679 //
test_vsseg8e16_v_f16mf2(_Float16 * base,vfloat16mf2_t v0,vfloat16mf2_t v1,vfloat16mf2_t v2,vfloat16mf2_t v3,vfloat16mf2_t v4,vfloat16mf2_t v5,vfloat16mf2_t v6,vfloat16mf2_t v7,size_t vl)1680 void test_vsseg8e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
1681   return vsseg8e16_v_f16mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
1682 }
1683 
1684 // CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m1(
1685 // CHECK-RV64-NEXT:  entry:
1686 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
1687 // CHECK-RV64-NEXT:    ret void
1688 //
test_vsseg2e16_v_f16m1(_Float16 * base,vfloat16m1_t v0,vfloat16m1_t v1,size_t vl)1689 void test_vsseg2e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
1690   return vsseg2e16_v_f16m1(base, v0, v1, vl);
1691 }
1692 
1693 // CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m1(
1694 // CHECK-RV64-NEXT:  entry:
1695 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
1696 // CHECK-RV64-NEXT:    ret void
1697 //
test_vsseg3e16_v_f16m1(_Float16 * base,vfloat16m1_t v0,vfloat16m1_t v1,vfloat16m1_t v2,size_t vl)1698 void test_vsseg3e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
1699   return vsseg3e16_v_f16m1(base, v0, v1, v2, vl);
1700 }
1701 
1702 // CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m1(
1703 // CHECK-RV64-NEXT:  entry:
1704 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
1705 // CHECK-RV64-NEXT:    ret void
1706 //
test_vsseg4e16_v_f16m1(_Float16 * base,vfloat16m1_t v0,vfloat16m1_t v1,vfloat16m1_t v2,vfloat16m1_t v3,size_t vl)1707 void test_vsseg4e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
1708   return vsseg4e16_v_f16m1(base, v0, v1, v2, v3, vl);
1709 }
1710 
1711 // CHECK-RV64-LABEL: @test_vsseg5e16_v_f16m1(
1712 // CHECK-RV64-NEXT:  entry:
1713 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
1714 // CHECK-RV64-NEXT:    ret void
1715 //
test_vsseg5e16_v_f16m1(_Float16 * base,vfloat16m1_t v0,vfloat16m1_t v1,vfloat16m1_t v2,vfloat16m1_t v3,vfloat16m1_t v4,size_t vl)1716 void test_vsseg5e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
1717   return vsseg5e16_v_f16m1(base, v0, v1, v2, v3, v4, vl);
1718 }
1719 
1720 // CHECK-RV64-LABEL: @test_vsseg6e16_v_f16m1(
1721 // CHECK-RV64-NEXT:  entry:
1722 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
1723 // CHECK-RV64-NEXT:    ret void
1724 //
test_vsseg6e16_v_f16m1(_Float16 * base,vfloat16m1_t v0,vfloat16m1_t v1,vfloat16m1_t v2,vfloat16m1_t v3,vfloat16m1_t v4,vfloat16m1_t v5,size_t vl)1725 void test_vsseg6e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
1726   return vsseg6e16_v_f16m1(base, v0, v1, v2, v3, v4, v5, vl);
1727 }
1728 
1729 // CHECK-RV64-LABEL: @test_vsseg7e16_v_f16m1(
1730 // CHECK-RV64-NEXT:  entry:
1731 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
1732 // CHECK-RV64-NEXT:    ret void
1733 //
test_vsseg7e16_v_f16m1(_Float16 * base,vfloat16m1_t v0,vfloat16m1_t v1,vfloat16m1_t v2,vfloat16m1_t v3,vfloat16m1_t v4,vfloat16m1_t v5,vfloat16m1_t v6,size_t vl)1734 void test_vsseg7e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
1735   return vsseg7e16_v_f16m1(base, v0, v1, v2, v3, v4, v5, v6, vl);
1736 }
1737 
1738 // CHECK-RV64-LABEL: @test_vsseg8e16_v_f16m1(
1739 // CHECK-RV64-NEXT:  entry:
1740 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], <vscale x 4 x half> [[V7:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
1741 // CHECK-RV64-NEXT:    ret void
1742 //
test_vsseg8e16_v_f16m1(_Float16 * base,vfloat16m1_t v0,vfloat16m1_t v1,vfloat16m1_t v2,vfloat16m1_t v3,vfloat16m1_t v4,vfloat16m1_t v5,vfloat16m1_t v6,vfloat16m1_t v7,size_t vl)1743 void test_vsseg8e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
1744   return vsseg8e16_v_f16m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
1745 }
1746 
1747 // CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m2(
1748 // CHECK-RV64-NEXT:  entry:
1749 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv8f16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
1750 // CHECK-RV64-NEXT:    ret void
1751 //
test_vsseg2e16_v_f16m2(_Float16 * base,vfloat16m2_t v0,vfloat16m2_t v1,size_t vl)1752 void test_vsseg2e16_v_f16m2 (_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
1753   return vsseg2e16_v_f16m2(base, v0, v1, vl);
1754 }
1755 
1756 // CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m2(
1757 // CHECK-RV64-NEXT:  entry:
1758 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv8f16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
1759 // CHECK-RV64-NEXT:    ret void
1760 //
test_vsseg3e16_v_f16m2(_Float16 * base,vfloat16m2_t v0,vfloat16m2_t v1,vfloat16m2_t v2,size_t vl)1761 void test_vsseg3e16_v_f16m2 (_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
1762   return vsseg3e16_v_f16m2(base, v0, v1, v2, vl);
1763 }
1764 
1765 // CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m2(
1766 // CHECK-RV64-NEXT:  entry:
1767 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv8f16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], <vscale x 8 x half> [[V3:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
1768 // CHECK-RV64-NEXT:    ret void
1769 //
test_vsseg4e16_v_f16m2(_Float16 * base,vfloat16m2_t v0,vfloat16m2_t v1,vfloat16m2_t v2,vfloat16m2_t v3,size_t vl)1770 void test_vsseg4e16_v_f16m2 (_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
1771   return vsseg4e16_v_f16m2(base, v0, v1, v2, v3, vl);
1772 }
1773 
1774 // CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m4(
1775 // CHECK-RV64-NEXT:  entry:
1776 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv16f16.i64(<vscale x 16 x half> [[V0:%.*]], <vscale x 16 x half> [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]])
1777 // CHECK-RV64-NEXT:    ret void
1778 //
test_vsseg2e16_v_f16m4(_Float16 * base,vfloat16m4_t v0,vfloat16m4_t v1,size_t vl)1779 void test_vsseg2e16_v_f16m4 (_Float16 *base, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
1780   return vsseg2e16_v_f16m4(base, v0, v1, vl);
1781 }
1782 
1783 // CHECK-RV64-LABEL: @test_vsseg2e32_v_f32mf2(
1784 // CHECK-RV64-NEXT:  entry:
1785 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv1f32.i64(<vscale x 1 x float> [[V0:%.*]], <vscale x 1 x float> [[V1:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]])
1786 // CHECK-RV64-NEXT:    ret void
1787 //
test_vsseg2e32_v_f32mf2(float * base,vfloat32mf2_t v0,vfloat32mf2_t v1,size_t vl)1788 void test_vsseg2e32_v_f32mf2 (float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) {
1789   return vsseg2e32_v_f32mf2(base, v0, v1, vl);
1790 }
1791 
1792 // CHECK-RV64-LABEL: @test_vsseg3e32_v_f32mf2(
1793 // CHECK-RV64-NEXT:  entry:
1794 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv1f32.i64(<vscale x 1 x float> [[V0:%.*]], <vscale x 1 x float> [[V1:%.*]], <vscale x 1 x float> [[V2:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]])
1795 // CHECK-RV64-NEXT:    ret void
1796 //
test_vsseg3e32_v_f32mf2(float * base,vfloat32mf2_t v0,vfloat32mf2_t v1,vfloat32mf2_t v2,size_t vl)1797 void test_vsseg3e32_v_f32mf2 (float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) {
1798   return vsseg3e32_v_f32mf2(base, v0, v1, v2, vl);
1799 }
1800 
1801 // CHECK-RV64-LABEL: @test_vsseg4e32_v_f32mf2(
1802 // CHECK-RV64-NEXT:  entry:
1803 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv1f32.i64(<vscale x 1 x float> [[V0:%.*]], <vscale x 1 x float> [[V1:%.*]], <vscale x 1 x float> [[V2:%.*]], <vscale x 1 x float> [[V3:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]])
1804 // CHECK-RV64-NEXT:    ret void
1805 //
test_vsseg4e32_v_f32mf2(float * base,vfloat32mf2_t v0,vfloat32mf2_t v1,vfloat32mf2_t v2,vfloat32mf2_t v3,size_t vl)1806 void test_vsseg4e32_v_f32mf2 (float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) {
1807   return vsseg4e32_v_f32mf2(base, v0, v1, v2, v3, vl);
1808 }
1809 
1810 // CHECK-RV64-LABEL: @test_vsseg5e32_v_f32mf2(
1811 // CHECK-RV64-NEXT:  entry:
1812 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv1f32.i64(<vscale x 1 x float> [[V0:%.*]], <vscale x 1 x float> [[V1:%.*]], <vscale x 1 x float> [[V2:%.*]], <vscale x 1 x float> [[V3:%.*]], <vscale x 1 x float> [[V4:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]])
1813 // CHECK-RV64-NEXT:    ret void
1814 //
test_vsseg5e32_v_f32mf2(float * base,vfloat32mf2_t v0,vfloat32mf2_t v1,vfloat32mf2_t v2,vfloat32mf2_t v3,vfloat32mf2_t v4,size_t vl)1815 void test_vsseg5e32_v_f32mf2 (float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) {
1816   return vsseg5e32_v_f32mf2(base, v0, v1, v2, v3, v4, vl);
1817 }
1818 
1819 // CHECK-RV64-LABEL: @test_vsseg6e32_v_f32mf2(
1820 // CHECK-RV64-NEXT:  entry:
1821 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv1f32.i64(<vscale x 1 x float> [[V0:%.*]], <vscale x 1 x float> [[V1:%.*]], <vscale x 1 x float> [[V2:%.*]], <vscale x 1 x float> [[V3:%.*]], <vscale x 1 x float> [[V4:%.*]], <vscale x 1 x float> [[V5:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]])
1822 // CHECK-RV64-NEXT:    ret void
1823 //
test_vsseg6e32_v_f32mf2(float * base,vfloat32mf2_t v0,vfloat32mf2_t v1,vfloat32mf2_t v2,vfloat32mf2_t v3,vfloat32mf2_t v4,vfloat32mf2_t v5,size_t vl)1824 void test_vsseg6e32_v_f32mf2 (float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) {
1825   return vsseg6e32_v_f32mf2(base, v0, v1, v2, v3, v4, v5, vl);
1826 }
1827 
1828 // CHECK-RV64-LABEL: @test_vsseg7e32_v_f32mf2(
1829 // CHECK-RV64-NEXT:  entry:
1830 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv1f32.i64(<vscale x 1 x float> [[V0:%.*]], <vscale x 1 x float> [[V1:%.*]], <vscale x 1 x float> [[V2:%.*]], <vscale x 1 x float> [[V3:%.*]], <vscale x 1 x float> [[V4:%.*]], <vscale x 1 x float> [[V5:%.*]], <vscale x 1 x float> [[V6:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]])
1831 // CHECK-RV64-NEXT:    ret void
1832 //
test_vsseg7e32_v_f32mf2(float * base,vfloat32mf2_t v0,vfloat32mf2_t v1,vfloat32mf2_t v2,vfloat32mf2_t v3,vfloat32mf2_t v4,vfloat32mf2_t v5,vfloat32mf2_t v6,size_t vl)1833 void test_vsseg7e32_v_f32mf2 (float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) {
1834   return vsseg7e32_v_f32mf2(base, v0, v1, v2, v3, v4, v5, v6, vl);
1835 }
1836 
1837 // CHECK-RV64-LABEL: @test_vsseg8e32_v_f32mf2(
1838 // CHECK-RV64-NEXT:  entry:
1839 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv1f32.i64(<vscale x 1 x float> [[V0:%.*]], <vscale x 1 x float> [[V1:%.*]], <vscale x 1 x float> [[V2:%.*]], <vscale x 1 x float> [[V3:%.*]], <vscale x 1 x float> [[V4:%.*]], <vscale x 1 x float> [[V5:%.*]], <vscale x 1 x float> [[V6:%.*]], <vscale x 1 x float> [[V7:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]])
1840 // CHECK-RV64-NEXT:    ret void
1841 //
test_vsseg8e32_v_f32mf2(float * base,vfloat32mf2_t v0,vfloat32mf2_t v1,vfloat32mf2_t v2,vfloat32mf2_t v3,vfloat32mf2_t v4,vfloat32mf2_t v5,vfloat32mf2_t v6,vfloat32mf2_t v7,size_t vl)1842 void test_vsseg8e32_v_f32mf2 (float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) {
1843   return vsseg8e32_v_f32mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
1844 }
1845 
1846 // CHECK-RV64-LABEL: @test_vsseg2e32_v_f32m1(
1847 // CHECK-RV64-NEXT:  entry:
1848 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv2f32.i64(<vscale x 2 x float> [[V0:%.*]], <vscale x 2 x float> [[V1:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]])
1849 // CHECK-RV64-NEXT:    ret void
1850 //
test_vsseg2e32_v_f32m1(float * base,vfloat32m1_t v0,vfloat32m1_t v1,size_t vl)1851 void test_vsseg2e32_v_f32m1 (float *base, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) {
1852   return vsseg2e32_v_f32m1(base, v0, v1, vl);
1853 }
1854 
1855 // CHECK-RV64-LABEL: @test_vsseg3e32_v_f32m1(
1856 // CHECK-RV64-NEXT:  entry:
1857 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv2f32.i64(<vscale x 2 x float> [[V0:%.*]], <vscale x 2 x float> [[V1:%.*]], <vscale x 2 x float> [[V2:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]])
1858 // CHECK-RV64-NEXT:    ret void
1859 //
test_vsseg3e32_v_f32m1(float * base,vfloat32m1_t v0,vfloat32m1_t v1,vfloat32m1_t v2,size_t vl)1860 void test_vsseg3e32_v_f32m1 (float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) {
1861   return vsseg3e32_v_f32m1(base, v0, v1, v2, vl);
1862 }
1863 
1864 // CHECK-RV64-LABEL: @test_vsseg4e32_v_f32m1(
1865 // CHECK-RV64-NEXT:  entry:
1866 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv2f32.i64(<vscale x 2 x float> [[V0:%.*]], <vscale x 2 x float> [[V1:%.*]], <vscale x 2 x float> [[V2:%.*]], <vscale x 2 x float> [[V3:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]])
1867 // CHECK-RV64-NEXT:    ret void
1868 //
test_vsseg4e32_v_f32m1(float * base,vfloat32m1_t v0,vfloat32m1_t v1,vfloat32m1_t v2,vfloat32m1_t v3,size_t vl)1869 void test_vsseg4e32_v_f32m1 (float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) {
1870   return vsseg4e32_v_f32m1(base, v0, v1, v2, v3, vl);
1871 }
1872 
1873 // CHECK-RV64-LABEL: @test_vsseg5e32_v_f32m1(
1874 // CHECK-RV64-NEXT:  entry:
1875 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv2f32.i64(<vscale x 2 x float> [[V0:%.*]], <vscale x 2 x float> [[V1:%.*]], <vscale x 2 x float> [[V2:%.*]], <vscale x 2 x float> [[V3:%.*]], <vscale x 2 x float> [[V4:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]])
1876 // CHECK-RV64-NEXT:    ret void
1877 //
test_vsseg5e32_v_f32m1(float * base,vfloat32m1_t v0,vfloat32m1_t v1,vfloat32m1_t v2,vfloat32m1_t v3,vfloat32m1_t v4,size_t vl)1878 void test_vsseg5e32_v_f32m1 (float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) {
1879   return vsseg5e32_v_f32m1(base, v0, v1, v2, v3, v4, vl);
1880 }
1881 
1882 // CHECK-RV64-LABEL: @test_vsseg6e32_v_f32m1(
1883 // CHECK-RV64-NEXT:  entry:
1884 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv2f32.i64(<vscale x 2 x float> [[V0:%.*]], <vscale x 2 x float> [[V1:%.*]], <vscale x 2 x float> [[V2:%.*]], <vscale x 2 x float> [[V3:%.*]], <vscale x 2 x float> [[V4:%.*]], <vscale x 2 x float> [[V5:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]])
1885 // CHECK-RV64-NEXT:    ret void
1886 //
test_vsseg6e32_v_f32m1(float * base,vfloat32m1_t v0,vfloat32m1_t v1,vfloat32m1_t v2,vfloat32m1_t v3,vfloat32m1_t v4,vfloat32m1_t v5,size_t vl)1887 void test_vsseg6e32_v_f32m1 (float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) {
1888   return vsseg6e32_v_f32m1(base, v0, v1, v2, v3, v4, v5, vl);
1889 }
1890 
1891 // CHECK-RV64-LABEL: @test_vsseg7e32_v_f32m1(
1892 // CHECK-RV64-NEXT:  entry:
1893 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv2f32.i64(<vscale x 2 x float> [[V0:%.*]], <vscale x 2 x float> [[V1:%.*]], <vscale x 2 x float> [[V2:%.*]], <vscale x 2 x float> [[V3:%.*]], <vscale x 2 x float> [[V4:%.*]], <vscale x 2 x float> [[V5:%.*]], <vscale x 2 x float> [[V6:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]])
1894 // CHECK-RV64-NEXT:    ret void
1895 //
test_vsseg7e32_v_f32m1(float * base,vfloat32m1_t v0,vfloat32m1_t v1,vfloat32m1_t v2,vfloat32m1_t v3,vfloat32m1_t v4,vfloat32m1_t v5,vfloat32m1_t v6,size_t vl)1896 void test_vsseg7e32_v_f32m1 (float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) {
1897   return vsseg7e32_v_f32m1(base, v0, v1, v2, v3, v4, v5, v6, vl);
1898 }
1899 
1900 // CHECK-RV64-LABEL: @test_vsseg8e32_v_f32m1(
1901 // CHECK-RV64-NEXT:  entry:
1902 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv2f32.i64(<vscale x 2 x float> [[V0:%.*]], <vscale x 2 x float> [[V1:%.*]], <vscale x 2 x float> [[V2:%.*]], <vscale x 2 x float> [[V3:%.*]], <vscale x 2 x float> [[V4:%.*]], <vscale x 2 x float> [[V5:%.*]], <vscale x 2 x float> [[V6:%.*]], <vscale x 2 x float> [[V7:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]])
1903 // CHECK-RV64-NEXT:    ret void
1904 //
test_vsseg8e32_v_f32m1(float * base,vfloat32m1_t v0,vfloat32m1_t v1,vfloat32m1_t v2,vfloat32m1_t v3,vfloat32m1_t v4,vfloat32m1_t v5,vfloat32m1_t v6,vfloat32m1_t v7,size_t vl)1905 void test_vsseg8e32_v_f32m1 (float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) {
1906   return vsseg8e32_v_f32m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
1907 }
1908 
1909 // CHECK-RV64-LABEL: @test_vsseg2e32_v_f32m2(
1910 // CHECK-RV64-NEXT:  entry:
1911 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv4f32.i64(<vscale x 4 x float> [[V0:%.*]], <vscale x 4 x float> [[V1:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]])
1912 // CHECK-RV64-NEXT:    ret void
1913 //
test_vsseg2e32_v_f32m2(float * base,vfloat32m2_t v0,vfloat32m2_t v1,size_t vl)1914 void test_vsseg2e32_v_f32m2 (float *base, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) {
1915   return vsseg2e32_v_f32m2(base, v0, v1, vl);
1916 }
1917 
1918 // CHECK-RV64-LABEL: @test_vsseg3e32_v_f32m2(
1919 // CHECK-RV64-NEXT:  entry:
1920 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv4f32.i64(<vscale x 4 x float> [[V0:%.*]], <vscale x 4 x float> [[V1:%.*]], <vscale x 4 x float> [[V2:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]])
1921 // CHECK-RV64-NEXT:    ret void
1922 //
test_vsseg3e32_v_f32m2(float * base,vfloat32m2_t v0,vfloat32m2_t v1,vfloat32m2_t v2,size_t vl)1923 void test_vsseg3e32_v_f32m2 (float *base, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) {
1924   return vsseg3e32_v_f32m2(base, v0, v1, v2, vl);
1925 }
1926 
1927 // CHECK-RV64-LABEL: @test_vsseg4e32_v_f32m2(
1928 // CHECK-RV64-NEXT:  entry:
1929 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv4f32.i64(<vscale x 4 x float> [[V0:%.*]], <vscale x 4 x float> [[V1:%.*]], <vscale x 4 x float> [[V2:%.*]], <vscale x 4 x float> [[V3:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]])
1930 // CHECK-RV64-NEXT:    ret void
1931 //
test_vsseg4e32_v_f32m2(float * base,vfloat32m2_t v0,vfloat32m2_t v1,vfloat32m2_t v2,vfloat32m2_t v3,size_t vl)1932 void test_vsseg4e32_v_f32m2 (float *base, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) {
1933   return vsseg4e32_v_f32m2(base, v0, v1, v2, v3, vl);
1934 }
1935 
1936 // CHECK-RV64-LABEL: @test_vsseg2e32_v_f32m4(
1937 // CHECK-RV64-NEXT:  entry:
1938 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv8f32.i64(<vscale x 8 x float> [[V0:%.*]], <vscale x 8 x float> [[V1:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]])
1939 // CHECK-RV64-NEXT:    ret void
1940 //
test_vsseg2e32_v_f32m4(float * base,vfloat32m4_t v0,vfloat32m4_t v1,size_t vl)1941 void test_vsseg2e32_v_f32m4 (float *base, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) {
1942   return vsseg2e32_v_f32m4(base, v0, v1, vl);
1943 }
1944 
1945 // CHECK-RV64-LABEL: @test_vsseg2e64_v_f64m1(
1946 // CHECK-RV64-NEXT:  entry:
1947 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv1f64.i64(<vscale x 1 x double> [[V0:%.*]], <vscale x 1 x double> [[V1:%.*]], double* [[BASE:%.*]], i64 [[VL:%.*]])
1948 // CHECK-RV64-NEXT:    ret void
1949 //
test_vsseg2e64_v_f64m1(double * base,vfloat64m1_t v0,vfloat64m1_t v1,size_t vl)1950 void test_vsseg2e64_v_f64m1 (double *base, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) {
1951   return vsseg2e64_v_f64m1(base, v0, v1, vl);
1952 }
1953 
1954 // CHECK-RV64-LABEL: @test_vsseg3e64_v_f64m1(
1955 // CHECK-RV64-NEXT:  entry:
1956 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv1f64.i64(<vscale x 1 x double> [[V0:%.*]], <vscale x 1 x double> [[V1:%.*]], <vscale x 1 x double> [[V2:%.*]], double* [[BASE:%.*]], i64 [[VL:%.*]])
1957 // CHECK-RV64-NEXT:    ret void
1958 //
test_vsseg3e64_v_f64m1(double * base,vfloat64m1_t v0,vfloat64m1_t v1,vfloat64m1_t v2,size_t vl)1959 void test_vsseg3e64_v_f64m1 (double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) {
1960   return vsseg3e64_v_f64m1(base, v0, v1, v2, vl);
1961 }
1962 
1963 // CHECK-RV64-LABEL: @test_vsseg4e64_v_f64m1(
1964 // CHECK-RV64-NEXT:  entry:
1965 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv1f64.i64(<vscale x 1 x double> [[V0:%.*]], <vscale x 1 x double> [[V1:%.*]], <vscale x 1 x double> [[V2:%.*]], <vscale x 1 x double> [[V3:%.*]], double* [[BASE:%.*]], i64 [[VL:%.*]])
1966 // CHECK-RV64-NEXT:    ret void
1967 //
test_vsseg4e64_v_f64m1(double * base,vfloat64m1_t v0,vfloat64m1_t v1,vfloat64m1_t v2,vfloat64m1_t v3,size_t vl)1968 void test_vsseg4e64_v_f64m1 (double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) {
1969   return vsseg4e64_v_f64m1(base, v0, v1, v2, v3, vl);
1970 }
1971 
1972 // CHECK-RV64-LABEL: @test_vsseg5e64_v_f64m1(
1973 // CHECK-RV64-NEXT:  entry:
1974 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.nxv1f64.i64(<vscale x 1 x double> [[V0:%.*]], <vscale x 1 x double> [[V1:%.*]], <vscale x 1 x double> [[V2:%.*]], <vscale x 1 x double> [[V3:%.*]], <vscale x 1 x double> [[V4:%.*]], double* [[BASE:%.*]], i64 [[VL:%.*]])
1975 // CHECK-RV64-NEXT:    ret void
1976 //
test_vsseg5e64_v_f64m1(double * base,vfloat64m1_t v0,vfloat64m1_t v1,vfloat64m1_t v2,vfloat64m1_t v3,vfloat64m1_t v4,size_t vl)1977 void test_vsseg5e64_v_f64m1 (double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) {
1978   return vsseg5e64_v_f64m1(base, v0, v1, v2, v3, v4, vl);
1979 }
1980 
1981 // CHECK-RV64-LABEL: @test_vsseg6e64_v_f64m1(
1982 // CHECK-RV64-NEXT:  entry:
1983 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.nxv1f64.i64(<vscale x 1 x double> [[V0:%.*]], <vscale x 1 x double> [[V1:%.*]], <vscale x 1 x double> [[V2:%.*]], <vscale x 1 x double> [[V3:%.*]], <vscale x 1 x double> [[V4:%.*]], <vscale x 1 x double> [[V5:%.*]], double* [[BASE:%.*]], i64 [[VL:%.*]])
1984 // CHECK-RV64-NEXT:    ret void
1985 //
test_vsseg6e64_v_f64m1(double * base,vfloat64m1_t v0,vfloat64m1_t v1,vfloat64m1_t v2,vfloat64m1_t v3,vfloat64m1_t v4,vfloat64m1_t v5,size_t vl)1986 void test_vsseg6e64_v_f64m1 (double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) {
1987   return vsseg6e64_v_f64m1(base, v0, v1, v2, v3, v4, v5, vl);
1988 }
1989 
1990 // CHECK-RV64-LABEL: @test_vsseg7e64_v_f64m1(
1991 // CHECK-RV64-NEXT:  entry:
1992 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.nxv1f64.i64(<vscale x 1 x double> [[V0:%.*]], <vscale x 1 x double> [[V1:%.*]], <vscale x 1 x double> [[V2:%.*]], <vscale x 1 x double> [[V3:%.*]], <vscale x 1 x double> [[V4:%.*]], <vscale x 1 x double> [[V5:%.*]], <vscale x 1 x double> [[V6:%.*]], double* [[BASE:%.*]], i64 [[VL:%.*]])
1993 // CHECK-RV64-NEXT:    ret void
1994 //
test_vsseg7e64_v_f64m1(double * base,vfloat64m1_t v0,vfloat64m1_t v1,vfloat64m1_t v2,vfloat64m1_t v3,vfloat64m1_t v4,vfloat64m1_t v5,vfloat64m1_t v6,size_t vl)1995 void test_vsseg7e64_v_f64m1 (double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) {
1996   return vsseg7e64_v_f64m1(base, v0, v1, v2, v3, v4, v5, v6, vl);
1997 }
1998 
1999 // CHECK-RV64-LABEL: @test_vsseg8e64_v_f64m1(
2000 // CHECK-RV64-NEXT:  entry:
2001 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.nxv1f64.i64(<vscale x 1 x double> [[V0:%.*]], <vscale x 1 x double> [[V1:%.*]], <vscale x 1 x double> [[V2:%.*]], <vscale x 1 x double> [[V3:%.*]], <vscale x 1 x double> [[V4:%.*]], <vscale x 1 x double> [[V5:%.*]], <vscale x 1 x double> [[V6:%.*]], <vscale x 1 x double> [[V7:%.*]], double* [[BASE:%.*]], i64 [[VL:%.*]])
2002 // CHECK-RV64-NEXT:    ret void
2003 //
test_vsseg8e64_v_f64m1(double * base,vfloat64m1_t v0,vfloat64m1_t v1,vfloat64m1_t v2,vfloat64m1_t v3,vfloat64m1_t v4,vfloat64m1_t v5,vfloat64m1_t v6,vfloat64m1_t v7,size_t vl)2004 void test_vsseg8e64_v_f64m1 (double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) {
2005   return vsseg8e64_v_f64m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
2006 }
2007 
2008 // CHECK-RV64-LABEL: @test_vsseg2e64_v_f64m2(
2009 // CHECK-RV64-NEXT:  entry:
2010 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv2f64.i64(<vscale x 2 x double> [[V0:%.*]], <vscale x 2 x double> [[V1:%.*]], double* [[BASE:%.*]], i64 [[VL:%.*]])
2011 // CHECK-RV64-NEXT:    ret void
2012 //
test_vsseg2e64_v_f64m2(double * base,vfloat64m2_t v0,vfloat64m2_t v1,size_t vl)2013 void test_vsseg2e64_v_f64m2 (double *base, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) {
2014   return vsseg2e64_v_f64m2(base, v0, v1, vl);
2015 }
2016 
2017 // CHECK-RV64-LABEL: @test_vsseg3e64_v_f64m2(
2018 // CHECK-RV64-NEXT:  entry:
2019 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.nxv2f64.i64(<vscale x 2 x double> [[V0:%.*]], <vscale x 2 x double> [[V1:%.*]], <vscale x 2 x double> [[V2:%.*]], double* [[BASE:%.*]], i64 [[VL:%.*]])
2020 // CHECK-RV64-NEXT:    ret void
2021 //
test_vsseg3e64_v_f64m2(double * base,vfloat64m2_t v0,vfloat64m2_t v1,vfloat64m2_t v2,size_t vl)2022 void test_vsseg3e64_v_f64m2 (double *base, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) {
2023   return vsseg3e64_v_f64m2(base, v0, v1, v2, vl);
2024 }
2025 
2026 // CHECK-RV64-LABEL: @test_vsseg4e64_v_f64m2(
2027 // CHECK-RV64-NEXT:  entry:
2028 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.nxv2f64.i64(<vscale x 2 x double> [[V0:%.*]], <vscale x 2 x double> [[V1:%.*]], <vscale x 2 x double> [[V2:%.*]], <vscale x 2 x double> [[V3:%.*]], double* [[BASE:%.*]], i64 [[VL:%.*]])
2029 // CHECK-RV64-NEXT:    ret void
2030 //
test_vsseg4e64_v_f64m2(double * base,vfloat64m2_t v0,vfloat64m2_t v1,vfloat64m2_t v2,vfloat64m2_t v3,size_t vl)2031 void test_vsseg4e64_v_f64m2 (double *base, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) {
2032   return vsseg4e64_v_f64m2(base, v0, v1, v2, v3, vl);
2033 }
2034 
2035 // CHECK-RV64-LABEL: @test_vsseg2e64_v_f64m4(
2036 // CHECK-RV64-NEXT:  entry:
2037 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.nxv4f64.i64(<vscale x 4 x double> [[V0:%.*]], <vscale x 4 x double> [[V1:%.*]], double* [[BASE:%.*]], i64 [[VL:%.*]])
2038 // CHECK-RV64-NEXT:    ret void
2039 //
test_vsseg2e64_v_f64m4(double * base,vfloat64m4_t v0,vfloat64m4_t v1,size_t vl)2040 void test_vsseg2e64_v_f64m4 (double *base, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) {
2041   return vsseg2e64_v_f64m4(base, v0, v1, vl);
2042 }
2043 
2044 // CHECK-RV64-LABEL: @test_vsseg2e8_v_i8mf8_m(
2045 // CHECK-RV64-NEXT:  entry:
2046 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], i8* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2047 // CHECK-RV64-NEXT:    ret void
2048 //
test_vsseg2e8_v_i8mf8_m(vbool64_t mask,int8_t * base,vint8mf8_t v0,vint8mf8_t v1,size_t vl)2049 void test_vsseg2e8_v_i8mf8_m (vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, size_t vl) {
2050   return vsseg2e8_v_i8mf8_m(mask, base, v0, v1, vl);
2051 }
2052 
2053 // CHECK-RV64-LABEL: @test_vsseg3e8_v_i8mf8_m(
2054 // CHECK-RV64-NEXT:  entry:
2055 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], <vscale x 1 x i8> [[V2:%.*]], i8* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2056 // CHECK-RV64-NEXT:    ret void
2057 //
test_vsseg3e8_v_i8mf8_m(vbool64_t mask,int8_t * base,vint8mf8_t v0,vint8mf8_t v1,vint8mf8_t v2,size_t vl)2058 void test_vsseg3e8_v_i8mf8_m (vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) {
2059   return vsseg3e8_v_i8mf8_m(mask, base, v0, v1, v2, vl);
2060 }
2061 
2062 // CHECK-RV64-LABEL: @test_vsseg4e8_v_i8mf8_m(
2063 // CHECK-RV64-NEXT:  entry:
2064 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], <vscale x 1 x i8> [[V2:%.*]], <vscale x 1 x i8> [[V3:%.*]], i8* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2065 // CHECK-RV64-NEXT:    ret void
2066 //
test_vsseg4e8_v_i8mf8_m(vbool64_t mask,int8_t * base,vint8mf8_t v0,vint8mf8_t v1,vint8mf8_t v2,vint8mf8_t v3,size_t vl)2067 void test_vsseg4e8_v_i8mf8_m (vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) {
2068   return vsseg4e8_v_i8mf8_m(mask, base, v0, v1, v2, v3, vl);
2069 }
2070 
2071 // CHECK-RV64-LABEL: @test_vsseg5e8_v_i8mf8_m(
2072 // CHECK-RV64-NEXT:  entry:
2073 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], <vscale x 1 x i8> [[V2:%.*]], <vscale x 1 x i8> [[V3:%.*]], <vscale x 1 x i8> [[V4:%.*]], i8* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2074 // CHECK-RV64-NEXT:    ret void
2075 //
test_vsseg5e8_v_i8mf8_m(vbool64_t mask,int8_t * base,vint8mf8_t v0,vint8mf8_t v1,vint8mf8_t v2,vint8mf8_t v3,vint8mf8_t v4,size_t vl)2076 void test_vsseg5e8_v_i8mf8_m (vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) {
2077   return vsseg5e8_v_i8mf8_m(mask, base, v0, v1, v2, v3, v4, vl);
2078 }
2079 
2080 // CHECK-RV64-LABEL: @test_vsseg6e8_v_i8mf8_m(
2081 // CHECK-RV64-NEXT:  entry:
2082 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], <vscale x 1 x i8> [[V2:%.*]], <vscale x 1 x i8> [[V3:%.*]], <vscale x 1 x i8> [[V4:%.*]], <vscale x 1 x i8> [[V5:%.*]], i8* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2083 // CHECK-RV64-NEXT:    ret void
2084 //
test_vsseg6e8_v_i8mf8_m(vbool64_t mask,int8_t * base,vint8mf8_t v0,vint8mf8_t v1,vint8mf8_t v2,vint8mf8_t v3,vint8mf8_t v4,vint8mf8_t v5,size_t vl)2085 void test_vsseg6e8_v_i8mf8_m (vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) {
2086   return vsseg6e8_v_i8mf8_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
2087 }
2088 
2089 // CHECK-RV64-LABEL: @test_vsseg7e8_v_i8mf8_m(
2090 // CHECK-RV64-NEXT:  entry:
2091 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], <vscale x 1 x i8> [[V2:%.*]], <vscale x 1 x i8> [[V3:%.*]], <vscale x 1 x i8> [[V4:%.*]], <vscale x 1 x i8> [[V5:%.*]], <vscale x 1 x i8> [[V6:%.*]], i8* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2092 // CHECK-RV64-NEXT:    ret void
2093 //
test_vsseg7e8_v_i8mf8_m(vbool64_t mask,int8_t * base,vint8mf8_t v0,vint8mf8_t v1,vint8mf8_t v2,vint8mf8_t v3,vint8mf8_t v4,vint8mf8_t v5,vint8mf8_t v6,size_t vl)2094 void test_vsseg7e8_v_i8mf8_m (vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) {
2095   return vsseg7e8_v_i8mf8_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
2096 }
2097 
2098 // CHECK-RV64-LABEL: @test_vsseg8e8_v_i8mf8_m(
2099 // CHECK-RV64-NEXT:  entry:
2100 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], <vscale x 1 x i8> [[V2:%.*]], <vscale x 1 x i8> [[V3:%.*]], <vscale x 1 x i8> [[V4:%.*]], <vscale x 1 x i8> [[V5:%.*]], <vscale x 1 x i8> [[V6:%.*]], <vscale x 1 x i8> [[V7:%.*]], i8* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2101 // CHECK-RV64-NEXT:    ret void
2102 //
test_vsseg8e8_v_i8mf8_m(vbool64_t mask,int8_t * base,vint8mf8_t v0,vint8mf8_t v1,vint8mf8_t v2,vint8mf8_t v3,vint8mf8_t v4,vint8mf8_t v5,vint8mf8_t v6,vint8mf8_t v7,size_t vl)2103 void test_vsseg8e8_v_i8mf8_m (vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) {
2104   return vsseg8e8_v_i8mf8_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
2105 }
2106 
2107 // CHECK-RV64-LABEL: @test_vsseg2e8_v_i8mf4_m(
2108 // CHECK-RV64-NEXT:  entry:
2109 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], i8* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2110 // CHECK-RV64-NEXT:    ret void
2111 //
test_vsseg2e8_v_i8mf4_m(vbool32_t mask,int8_t * base,vint8mf4_t v0,vint8mf4_t v1,size_t vl)2112 void test_vsseg2e8_v_i8mf4_m (vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, size_t vl) {
2113   return vsseg2e8_v_i8mf4_m(mask, base, v0, v1, vl);
2114 }
2115 
2116 // CHECK-RV64-LABEL: @test_vsseg3e8_v_i8mf4_m(
2117 // CHECK-RV64-NEXT:  entry:
2118 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], <vscale x 2 x i8> [[V2:%.*]], i8* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2119 // CHECK-RV64-NEXT:    ret void
2120 //
test_vsseg3e8_v_i8mf4_m(vbool32_t mask,int8_t * base,vint8mf4_t v0,vint8mf4_t v1,vint8mf4_t v2,size_t vl)2121 void test_vsseg3e8_v_i8mf4_m (vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) {
2122   return vsseg3e8_v_i8mf4_m(mask, base, v0, v1, v2, vl);
2123 }
2124 
2125 // CHECK-RV64-LABEL: @test_vsseg4e8_v_i8mf4_m(
2126 // CHECK-RV64-NEXT:  entry:
2127 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], <vscale x 2 x i8> [[V2:%.*]], <vscale x 2 x i8> [[V3:%.*]], i8* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2128 // CHECK-RV64-NEXT:    ret void
2129 //
test_vsseg4e8_v_i8mf4_m(vbool32_t mask,int8_t * base,vint8mf4_t v0,vint8mf4_t v1,vint8mf4_t v2,vint8mf4_t v3,size_t vl)2130 void test_vsseg4e8_v_i8mf4_m (vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) {
2131   return vsseg4e8_v_i8mf4_m(mask, base, v0, v1, v2, v3, vl);
2132 }
2133 
2134 // CHECK-RV64-LABEL: @test_vsseg5e8_v_i8mf4_m(
2135 // CHECK-RV64-NEXT:  entry:
2136 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], <vscale x 2 x i8> [[V2:%.*]], <vscale x 2 x i8> [[V3:%.*]], <vscale x 2 x i8> [[V4:%.*]], i8* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2137 // CHECK-RV64-NEXT:    ret void
2138 //
test_vsseg5e8_v_i8mf4_m(vbool32_t mask,int8_t * base,vint8mf4_t v0,vint8mf4_t v1,vint8mf4_t v2,vint8mf4_t v3,vint8mf4_t v4,size_t vl)2139 void test_vsseg5e8_v_i8mf4_m (vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) {
2140   return vsseg5e8_v_i8mf4_m(mask, base, v0, v1, v2, v3, v4, vl);
2141 }
2142 
2143 // CHECK-RV64-LABEL: @test_vsseg6e8_v_i8mf4_m(
2144 // CHECK-RV64-NEXT:  entry:
2145 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], <vscale x 2 x i8> [[V2:%.*]], <vscale x 2 x i8> [[V3:%.*]], <vscale x 2 x i8> [[V4:%.*]], <vscale x 2 x i8> [[V5:%.*]], i8* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2146 // CHECK-RV64-NEXT:    ret void
2147 //
test_vsseg6e8_v_i8mf4_m(vbool32_t mask,int8_t * base,vint8mf4_t v0,vint8mf4_t v1,vint8mf4_t v2,vint8mf4_t v3,vint8mf4_t v4,vint8mf4_t v5,size_t vl)2148 void test_vsseg6e8_v_i8mf4_m (vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) {
2149   return vsseg6e8_v_i8mf4_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
2150 }
2151 
2152 // CHECK-RV64-LABEL: @test_vsseg7e8_v_i8mf4_m(
2153 // CHECK-RV64-NEXT:  entry:
2154 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], <vscale x 2 x i8> [[V2:%.*]], <vscale x 2 x i8> [[V3:%.*]], <vscale x 2 x i8> [[V4:%.*]], <vscale x 2 x i8> [[V5:%.*]], <vscale x 2 x i8> [[V6:%.*]], i8* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2155 // CHECK-RV64-NEXT:    ret void
2156 //
test_vsseg7e8_v_i8mf4_m(vbool32_t mask,int8_t * base,vint8mf4_t v0,vint8mf4_t v1,vint8mf4_t v2,vint8mf4_t v3,vint8mf4_t v4,vint8mf4_t v5,vint8mf4_t v6,size_t vl)2157 void test_vsseg7e8_v_i8mf4_m (vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) {
2158   return vsseg7e8_v_i8mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
2159 }
2160 
2161 // CHECK-RV64-LABEL: @test_vsseg8e8_v_i8mf4_m(
2162 // CHECK-RV64-NEXT:  entry:
2163 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], <vscale x 2 x i8> [[V2:%.*]], <vscale x 2 x i8> [[V3:%.*]], <vscale x 2 x i8> [[V4:%.*]], <vscale x 2 x i8> [[V5:%.*]], <vscale x 2 x i8> [[V6:%.*]], <vscale x 2 x i8> [[V7:%.*]], i8* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2164 // CHECK-RV64-NEXT:    ret void
2165 //
test_vsseg8e8_v_i8mf4_m(vbool32_t mask,int8_t * base,vint8mf4_t v0,vint8mf4_t v1,vint8mf4_t v2,vint8mf4_t v3,vint8mf4_t v4,vint8mf4_t v5,vint8mf4_t v6,vint8mf4_t v7,size_t vl)2166 void test_vsseg8e8_v_i8mf4_m (vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) {
2167   return vsseg8e8_v_i8mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
2168 }
2169 
2170 // CHECK-RV64-LABEL: @test_vsseg2e8_v_i8mf2_m(
2171 // CHECK-RV64-NEXT:  entry:
2172 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], i8* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2173 // CHECK-RV64-NEXT:    ret void
2174 //
test_vsseg2e8_v_i8mf2_m(vbool16_t mask,int8_t * base,vint8mf2_t v0,vint8mf2_t v1,size_t vl)2175 void test_vsseg2e8_v_i8mf2_m (vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, size_t vl) {
2176   return vsseg2e8_v_i8mf2_m(mask, base, v0, v1, vl);
2177 }
2178 
2179 // CHECK-RV64-LABEL: @test_vsseg3e8_v_i8mf2_m(
2180 // CHECK-RV64-NEXT:  entry:
2181 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], <vscale x 4 x i8> [[V2:%.*]], i8* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2182 // CHECK-RV64-NEXT:    ret void
2183 //
test_vsseg3e8_v_i8mf2_m(vbool16_t mask,int8_t * base,vint8mf2_t v0,vint8mf2_t v1,vint8mf2_t v2,size_t vl)2184 void test_vsseg3e8_v_i8mf2_m (vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) {
2185   return vsseg3e8_v_i8mf2_m(mask, base, v0, v1, v2, vl);
2186 }
2187 
2188 // CHECK-RV64-LABEL: @test_vsseg4e8_v_i8mf2_m(
2189 // CHECK-RV64-NEXT:  entry:
2190 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], <vscale x 4 x i8> [[V2:%.*]], <vscale x 4 x i8> [[V3:%.*]], i8* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2191 // CHECK-RV64-NEXT:    ret void
2192 //
test_vsseg4e8_v_i8mf2_m(vbool16_t mask,int8_t * base,vint8mf2_t v0,vint8mf2_t v1,vint8mf2_t v2,vint8mf2_t v3,size_t vl)2193 void test_vsseg4e8_v_i8mf2_m (vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) {
2194   return vsseg4e8_v_i8mf2_m(mask, base, v0, v1, v2, v3, vl);
2195 }
2196 
2197 // CHECK-RV64-LABEL: @test_vsseg5e8_v_i8mf2_m(
2198 // CHECK-RV64-NEXT:  entry:
2199 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], <vscale x 4 x i8> [[V2:%.*]], <vscale x 4 x i8> [[V3:%.*]], <vscale x 4 x i8> [[V4:%.*]], i8* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2200 // CHECK-RV64-NEXT:    ret void
2201 //
test_vsseg5e8_v_i8mf2_m(vbool16_t mask,int8_t * base,vint8mf2_t v0,vint8mf2_t v1,vint8mf2_t v2,vint8mf2_t v3,vint8mf2_t v4,size_t vl)2202 void test_vsseg5e8_v_i8mf2_m (vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) {
2203   return vsseg5e8_v_i8mf2_m(mask, base, v0, v1, v2, v3, v4, vl);
2204 }
2205 
2206 // CHECK-RV64-LABEL: @test_vsseg6e8_v_i8mf2_m(
2207 // CHECK-RV64-NEXT:  entry:
2208 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], <vscale x 4 x i8> [[V2:%.*]], <vscale x 4 x i8> [[V3:%.*]], <vscale x 4 x i8> [[V4:%.*]], <vscale x 4 x i8> [[V5:%.*]], i8* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2209 // CHECK-RV64-NEXT:    ret void
2210 //
test_vsseg6e8_v_i8mf2_m(vbool16_t mask,int8_t * base,vint8mf2_t v0,vint8mf2_t v1,vint8mf2_t v2,vint8mf2_t v3,vint8mf2_t v4,vint8mf2_t v5,size_t vl)2211 void test_vsseg6e8_v_i8mf2_m (vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) {
2212   return vsseg6e8_v_i8mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
2213 }
2214 
2215 // CHECK-RV64-LABEL: @test_vsseg7e8_v_i8mf2_m(
2216 // CHECK-RV64-NEXT:  entry:
2217 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], <vscale x 4 x i8> [[V2:%.*]], <vscale x 4 x i8> [[V3:%.*]], <vscale x 4 x i8> [[V4:%.*]], <vscale x 4 x i8> [[V5:%.*]], <vscale x 4 x i8> [[V6:%.*]], i8* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2218 // CHECK-RV64-NEXT:    ret void
2219 //
test_vsseg7e8_v_i8mf2_m(vbool16_t mask,int8_t * base,vint8mf2_t v0,vint8mf2_t v1,vint8mf2_t v2,vint8mf2_t v3,vint8mf2_t v4,vint8mf2_t v5,vint8mf2_t v6,size_t vl)2220 void test_vsseg7e8_v_i8mf2_m (vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) {
2221   return vsseg7e8_v_i8mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
2222 }
2223 
2224 // CHECK-RV64-LABEL: @test_vsseg8e8_v_i8mf2_m(
2225 // CHECK-RV64-NEXT:  entry:
2226 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], <vscale x 4 x i8> [[V2:%.*]], <vscale x 4 x i8> [[V3:%.*]], <vscale x 4 x i8> [[V4:%.*]], <vscale x 4 x i8> [[V5:%.*]], <vscale x 4 x i8> [[V6:%.*]], <vscale x 4 x i8> [[V7:%.*]], i8* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2227 // CHECK-RV64-NEXT:    ret void
2228 //
test_vsseg8e8_v_i8mf2_m(vbool16_t mask,int8_t * base,vint8mf2_t v0,vint8mf2_t v1,vint8mf2_t v2,vint8mf2_t v3,vint8mf2_t v4,vint8mf2_t v5,vint8mf2_t v6,vint8mf2_t v7,size_t vl)2229 void test_vsseg8e8_v_i8mf2_m (vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) {
2230   return vsseg8e8_v_i8mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
2231 }
2232 
2233 // CHECK-RV64-LABEL: @test_vsseg2e8_v_i8m1_m(
2234 // CHECK-RV64-NEXT:  entry:
2235 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], i8* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2236 // CHECK-RV64-NEXT:    ret void
2237 //
test_vsseg2e8_v_i8m1_m(vbool8_t mask,int8_t * base,vint8m1_t v0,vint8m1_t v1,size_t vl)2238 void test_vsseg2e8_v_i8m1_m (vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, size_t vl) {
2239   return vsseg2e8_v_i8m1_m(mask, base, v0, v1, vl);
2240 }
2241 
2242 // CHECK-RV64-LABEL: @test_vsseg3e8_v_i8m1_m(
2243 // CHECK-RV64-NEXT:  entry:
2244 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], <vscale x 8 x i8> [[V2:%.*]], i8* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2245 // CHECK-RV64-NEXT:    ret void
2246 //
test_vsseg3e8_v_i8m1_m(vbool8_t mask,int8_t * base,vint8m1_t v0,vint8m1_t v1,vint8m1_t v2,size_t vl)2247 void test_vsseg3e8_v_i8m1_m (vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) {
2248   return vsseg3e8_v_i8m1_m(mask, base, v0, v1, v2, vl);
2249 }
2250 
2251 // CHECK-RV64-LABEL: @test_vsseg4e8_v_i8m1_m(
2252 // CHECK-RV64-NEXT:  entry:
2253 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], <vscale x 8 x i8> [[V2:%.*]], <vscale x 8 x i8> [[V3:%.*]], i8* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2254 // CHECK-RV64-NEXT:    ret void
2255 //
test_vsseg4e8_v_i8m1_m(vbool8_t mask,int8_t * base,vint8m1_t v0,vint8m1_t v1,vint8m1_t v2,vint8m1_t v3,size_t vl)2256 void test_vsseg4e8_v_i8m1_m (vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) {
2257   return vsseg4e8_v_i8m1_m(mask, base, v0, v1, v2, v3, vl);
2258 }
2259 
2260 // CHECK-RV64-LABEL: @test_vsseg5e8_v_i8m1_m(
2261 // CHECK-RV64-NEXT:  entry:
2262 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], <vscale x 8 x i8> [[V2:%.*]], <vscale x 8 x i8> [[V3:%.*]], <vscale x 8 x i8> [[V4:%.*]], i8* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2263 // CHECK-RV64-NEXT:    ret void
2264 //
test_vsseg5e8_v_i8m1_m(vbool8_t mask,int8_t * base,vint8m1_t v0,vint8m1_t v1,vint8m1_t v2,vint8m1_t v3,vint8m1_t v4,size_t vl)2265 void test_vsseg5e8_v_i8m1_m (vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) {
2266   return vsseg5e8_v_i8m1_m(mask, base, v0, v1, v2, v3, v4, vl);
2267 }
2268 
2269 // CHECK-RV64-LABEL: @test_vsseg6e8_v_i8m1_m(
2270 // CHECK-RV64-NEXT:  entry:
2271 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], <vscale x 8 x i8> [[V2:%.*]], <vscale x 8 x i8> [[V3:%.*]], <vscale x 8 x i8> [[V4:%.*]], <vscale x 8 x i8> [[V5:%.*]], i8* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2272 // CHECK-RV64-NEXT:    ret void
2273 //
test_vsseg6e8_v_i8m1_m(vbool8_t mask,int8_t * base,vint8m1_t v0,vint8m1_t v1,vint8m1_t v2,vint8m1_t v3,vint8m1_t v4,vint8m1_t v5,size_t vl)2274 void test_vsseg6e8_v_i8m1_m (vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) {
2275   return vsseg6e8_v_i8m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
2276 }
2277 
2278 // CHECK-RV64-LABEL: @test_vsseg7e8_v_i8m1_m(
2279 // CHECK-RV64-NEXT:  entry:
2280 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], <vscale x 8 x i8> [[V2:%.*]], <vscale x 8 x i8> [[V3:%.*]], <vscale x 8 x i8> [[V4:%.*]], <vscale x 8 x i8> [[V5:%.*]], <vscale x 8 x i8> [[V6:%.*]], i8* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2281 // CHECK-RV64-NEXT:    ret void
2282 //
test_vsseg7e8_v_i8m1_m(vbool8_t mask,int8_t * base,vint8m1_t v0,vint8m1_t v1,vint8m1_t v2,vint8m1_t v3,vint8m1_t v4,vint8m1_t v5,vint8m1_t v6,size_t vl)2283 void test_vsseg7e8_v_i8m1_m (vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) {
2284   return vsseg7e8_v_i8m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
2285 }
2286 
2287 // CHECK-RV64-LABEL: @test_vsseg8e8_v_i8m1_m(
2288 // CHECK-RV64-NEXT:  entry:
2289 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], <vscale x 8 x i8> [[V2:%.*]], <vscale x 8 x i8> [[V3:%.*]], <vscale x 8 x i8> [[V4:%.*]], <vscale x 8 x i8> [[V5:%.*]], <vscale x 8 x i8> [[V6:%.*]], <vscale x 8 x i8> [[V7:%.*]], i8* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2290 // CHECK-RV64-NEXT:    ret void
2291 //
test_vsseg8e8_v_i8m1_m(vbool8_t mask,int8_t * base,vint8m1_t v0,vint8m1_t v1,vint8m1_t v2,vint8m1_t v3,vint8m1_t v4,vint8m1_t v5,vint8m1_t v6,vint8m1_t v7,size_t vl)2292 void test_vsseg8e8_v_i8m1_m (vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) {
2293   return vsseg8e8_v_i8m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
2294 }
2295 
2296 // CHECK-RV64-LABEL: @test_vsseg2e8_v_i8m2_m(
2297 // CHECK-RV64-NEXT:  entry:
2298 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv16i8.i64(<vscale x 16 x i8> [[V0:%.*]], <vscale x 16 x i8> [[V1:%.*]], i8* [[BASE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2299 // CHECK-RV64-NEXT:    ret void
2300 //
test_vsseg2e8_v_i8m2_m(vbool4_t mask,int8_t * base,vint8m2_t v0,vint8m2_t v1,size_t vl)2301 void test_vsseg2e8_v_i8m2_m (vbool4_t mask, int8_t *base, vint8m2_t v0, vint8m2_t v1, size_t vl) {
2302   return vsseg2e8_v_i8m2_m(mask, base, v0, v1, vl);
2303 }
2304 
2305 // CHECK-RV64-LABEL: @test_vsseg3e8_v_i8m2_m(
2306 // CHECK-RV64-NEXT:  entry:
2307 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv16i8.i64(<vscale x 16 x i8> [[V0:%.*]], <vscale x 16 x i8> [[V1:%.*]], <vscale x 16 x i8> [[V2:%.*]], i8* [[BASE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2308 // CHECK-RV64-NEXT:    ret void
2309 //
test_vsseg3e8_v_i8m2_m(vbool4_t mask,int8_t * base,vint8m2_t v0,vint8m2_t v1,vint8m2_t v2,size_t vl)2310 void test_vsseg3e8_v_i8m2_m (vbool4_t mask, int8_t *base, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) {
2311   return vsseg3e8_v_i8m2_m(mask, base, v0, v1, v2, vl);
2312 }
2313 
2314 // CHECK-RV64-LABEL: @test_vsseg4e8_v_i8m2_m(
2315 // CHECK-RV64-NEXT:  entry:
2316 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv16i8.i64(<vscale x 16 x i8> [[V0:%.*]], <vscale x 16 x i8> [[V1:%.*]], <vscale x 16 x i8> [[V2:%.*]], <vscale x 16 x i8> [[V3:%.*]], i8* [[BASE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2317 // CHECK-RV64-NEXT:    ret void
2318 //
test_vsseg4e8_v_i8m2_m(vbool4_t mask,int8_t * base,vint8m2_t v0,vint8m2_t v1,vint8m2_t v2,vint8m2_t v3,size_t vl)2319 void test_vsseg4e8_v_i8m2_m (vbool4_t mask, int8_t *base, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) {
2320   return vsseg4e8_v_i8m2_m(mask, base, v0, v1, v2, v3, vl);
2321 }
2322 
2323 // CHECK-RV64-LABEL: @test_vsseg2e8_v_i8m4_m(
2324 // CHECK-RV64-NEXT:  entry:
2325 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv32i8.i64(<vscale x 32 x i8> [[V0:%.*]], <vscale x 32 x i8> [[V1:%.*]], i8* [[BASE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2326 // CHECK-RV64-NEXT:    ret void
2327 //
test_vsseg2e8_v_i8m4_m(vbool2_t mask,int8_t * base,vint8m4_t v0,vint8m4_t v1,size_t vl)2328 void test_vsseg2e8_v_i8m4_m (vbool2_t mask, int8_t *base, vint8m4_t v0, vint8m4_t v1, size_t vl) {
2329   return vsseg2e8_v_i8m4_m(mask, base, v0, v1, vl);
2330 }
2331 
2332 // CHECK-RV64-LABEL: @test_vsseg2e16_v_i16mf4_m(
2333 // CHECK-RV64-NEXT:  entry:
2334 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], i16* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2335 // CHECK-RV64-NEXT:    ret void
2336 //
test_vsseg2e16_v_i16mf4_m(vbool64_t mask,int16_t * base,vint16mf4_t v0,vint16mf4_t v1,size_t vl)2337 void test_vsseg2e16_v_i16mf4_m (vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, size_t vl) {
2338   return vsseg2e16_v_i16mf4_m(mask, base, v0, v1, vl);
2339 }
2340 
2341 // CHECK-RV64-LABEL: @test_vsseg3e16_v_i16mf4_m(
2342 // CHECK-RV64-NEXT:  entry:
2343 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], <vscale x 1 x i16> [[V2:%.*]], i16* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2344 // CHECK-RV64-NEXT:    ret void
2345 //
test_vsseg3e16_v_i16mf4_m(vbool64_t mask,int16_t * base,vint16mf4_t v0,vint16mf4_t v1,vint16mf4_t v2,size_t vl)2346 void test_vsseg3e16_v_i16mf4_m (vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) {
2347   return vsseg3e16_v_i16mf4_m(mask, base, v0, v1, v2, vl);
2348 }
2349 
2350 // CHECK-RV64-LABEL: @test_vsseg4e16_v_i16mf4_m(
2351 // CHECK-RV64-NEXT:  entry:
2352 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], <vscale x 1 x i16> [[V2:%.*]], <vscale x 1 x i16> [[V3:%.*]], i16* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2353 // CHECK-RV64-NEXT:    ret void
2354 //
test_vsseg4e16_v_i16mf4_m(vbool64_t mask,int16_t * base,vint16mf4_t v0,vint16mf4_t v1,vint16mf4_t v2,vint16mf4_t v3,size_t vl)2355 void test_vsseg4e16_v_i16mf4_m (vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) {
2356   return vsseg4e16_v_i16mf4_m(mask, base, v0, v1, v2, v3, vl);
2357 }
2358 
2359 // CHECK-RV64-LABEL: @test_vsseg5e16_v_i16mf4_m(
2360 // CHECK-RV64-NEXT:  entry:
2361 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], <vscale x 1 x i16> [[V2:%.*]], <vscale x 1 x i16> [[V3:%.*]], <vscale x 1 x i16> [[V4:%.*]], i16* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2362 // CHECK-RV64-NEXT:    ret void
2363 //
test_vsseg5e16_v_i16mf4_m(vbool64_t mask,int16_t * base,vint16mf4_t v0,vint16mf4_t v1,vint16mf4_t v2,vint16mf4_t v3,vint16mf4_t v4,size_t vl)2364 void test_vsseg5e16_v_i16mf4_m (vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) {
2365   return vsseg5e16_v_i16mf4_m(mask, base, v0, v1, v2, v3, v4, vl);
2366 }
2367 
2368 // CHECK-RV64-LABEL: @test_vsseg6e16_v_i16mf4_m(
2369 // CHECK-RV64-NEXT:  entry:
2370 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], <vscale x 1 x i16> [[V2:%.*]], <vscale x 1 x i16> [[V3:%.*]], <vscale x 1 x i16> [[V4:%.*]], <vscale x 1 x i16> [[V5:%.*]], i16* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2371 // CHECK-RV64-NEXT:    ret void
2372 //
test_vsseg6e16_v_i16mf4_m(vbool64_t mask,int16_t * base,vint16mf4_t v0,vint16mf4_t v1,vint16mf4_t v2,vint16mf4_t v3,vint16mf4_t v4,vint16mf4_t v5,size_t vl)2373 void test_vsseg6e16_v_i16mf4_m (vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) {
2374   return vsseg6e16_v_i16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
2375 }
2376 
2377 // CHECK-RV64-LABEL: @test_vsseg7e16_v_i16mf4_m(
2378 // CHECK-RV64-NEXT:  entry:
2379 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], <vscale x 1 x i16> [[V2:%.*]], <vscale x 1 x i16> [[V3:%.*]], <vscale x 1 x i16> [[V4:%.*]], <vscale x 1 x i16> [[V5:%.*]], <vscale x 1 x i16> [[V6:%.*]], i16* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2380 // CHECK-RV64-NEXT:    ret void
2381 //
test_vsseg7e16_v_i16mf4_m(vbool64_t mask,int16_t * base,vint16mf4_t v0,vint16mf4_t v1,vint16mf4_t v2,vint16mf4_t v3,vint16mf4_t v4,vint16mf4_t v5,vint16mf4_t v6,size_t vl)2382 void test_vsseg7e16_v_i16mf4_m (vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) {
2383   return vsseg7e16_v_i16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
2384 }
2385 
2386 // CHECK-RV64-LABEL: @test_vsseg8e16_v_i16mf4_m(
2387 // CHECK-RV64-NEXT:  entry:
2388 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], <vscale x 1 x i16> [[V2:%.*]], <vscale x 1 x i16> [[V3:%.*]], <vscale x 1 x i16> [[V4:%.*]], <vscale x 1 x i16> [[V5:%.*]], <vscale x 1 x i16> [[V6:%.*]], <vscale x 1 x i16> [[V7:%.*]], i16* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2389 // CHECK-RV64-NEXT:    ret void
2390 //
test_vsseg8e16_v_i16mf4_m(vbool64_t mask,int16_t * base,vint16mf4_t v0,vint16mf4_t v1,vint16mf4_t v2,vint16mf4_t v3,vint16mf4_t v4,vint16mf4_t v5,vint16mf4_t v6,vint16mf4_t v7,size_t vl)2391 void test_vsseg8e16_v_i16mf4_m (vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) {
2392   return vsseg8e16_v_i16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
2393 }
2394 
2395 // CHECK-RV64-LABEL: @test_vsseg2e16_v_i16mf2_m(
2396 // CHECK-RV64-NEXT:  entry:
2397 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], i16* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2398 // CHECK-RV64-NEXT:    ret void
2399 //
test_vsseg2e16_v_i16mf2_m(vbool32_t mask,int16_t * base,vint16mf2_t v0,vint16mf2_t v1,size_t vl)2400 void test_vsseg2e16_v_i16mf2_m (vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, size_t vl) {
2401   return vsseg2e16_v_i16mf2_m(mask, base, v0, v1, vl);
2402 }
2403 
2404 // CHECK-RV64-LABEL: @test_vsseg3e16_v_i16mf2_m(
2405 // CHECK-RV64-NEXT:  entry:
2406 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], <vscale x 2 x i16> [[V2:%.*]], i16* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2407 // CHECK-RV64-NEXT:    ret void
2408 //
test_vsseg3e16_v_i16mf2_m(vbool32_t mask,int16_t * base,vint16mf2_t v0,vint16mf2_t v1,vint16mf2_t v2,size_t vl)2409 void test_vsseg3e16_v_i16mf2_m (vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) {
2410   return vsseg3e16_v_i16mf2_m(mask, base, v0, v1, v2, vl);
2411 }
2412 
2413 // CHECK-RV64-LABEL: @test_vsseg4e16_v_i16mf2_m(
2414 // CHECK-RV64-NEXT:  entry:
2415 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], <vscale x 2 x i16> [[V2:%.*]], <vscale x 2 x i16> [[V3:%.*]], i16* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2416 // CHECK-RV64-NEXT:    ret void
2417 //
test_vsseg4e16_v_i16mf2_m(vbool32_t mask,int16_t * base,vint16mf2_t v0,vint16mf2_t v1,vint16mf2_t v2,vint16mf2_t v3,size_t vl)2418 void test_vsseg4e16_v_i16mf2_m (vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) {
2419   return vsseg4e16_v_i16mf2_m(mask, base, v0, v1, v2, v3, vl);
2420 }
2421 
2422 // CHECK-RV64-LABEL: @test_vsseg5e16_v_i16mf2_m(
2423 // CHECK-RV64-NEXT:  entry:
2424 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], <vscale x 2 x i16> [[V2:%.*]], <vscale x 2 x i16> [[V3:%.*]], <vscale x 2 x i16> [[V4:%.*]], i16* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2425 // CHECK-RV64-NEXT:    ret void
2426 //
test_vsseg5e16_v_i16mf2_m(vbool32_t mask,int16_t * base,vint16mf2_t v0,vint16mf2_t v1,vint16mf2_t v2,vint16mf2_t v3,vint16mf2_t v4,size_t vl)2427 void test_vsseg5e16_v_i16mf2_m (vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) {
2428   return vsseg5e16_v_i16mf2_m(mask, base, v0, v1, v2, v3, v4, vl);
2429 }
2430 
2431 // CHECK-RV64-LABEL: @test_vsseg6e16_v_i16mf2_m(
2432 // CHECK-RV64-NEXT:  entry:
2433 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], <vscale x 2 x i16> [[V2:%.*]], <vscale x 2 x i16> [[V3:%.*]], <vscale x 2 x i16> [[V4:%.*]], <vscale x 2 x i16> [[V5:%.*]], i16* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2434 // CHECK-RV64-NEXT:    ret void
2435 //
test_vsseg6e16_v_i16mf2_m(vbool32_t mask,int16_t * base,vint16mf2_t v0,vint16mf2_t v1,vint16mf2_t v2,vint16mf2_t v3,vint16mf2_t v4,vint16mf2_t v5,size_t vl)2436 void test_vsseg6e16_v_i16mf2_m (vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) {
2437   return vsseg6e16_v_i16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
2438 }
2439 
2440 // CHECK-RV64-LABEL: @test_vsseg7e16_v_i16mf2_m(
2441 // CHECK-RV64-NEXT:  entry:
2442 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], <vscale x 2 x i16> [[V2:%.*]], <vscale x 2 x i16> [[V3:%.*]], <vscale x 2 x i16> [[V4:%.*]], <vscale x 2 x i16> [[V5:%.*]], <vscale x 2 x i16> [[V6:%.*]], i16* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2443 // CHECK-RV64-NEXT:    ret void
2444 //
test_vsseg7e16_v_i16mf2_m(vbool32_t mask,int16_t * base,vint16mf2_t v0,vint16mf2_t v1,vint16mf2_t v2,vint16mf2_t v3,vint16mf2_t v4,vint16mf2_t v5,vint16mf2_t v6,size_t vl)2445 void test_vsseg7e16_v_i16mf2_m (vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) {
2446   return vsseg7e16_v_i16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
2447 }
2448 
2449 // CHECK-RV64-LABEL: @test_vsseg8e16_v_i16mf2_m(
2450 // CHECK-RV64-NEXT:  entry:
2451 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], <vscale x 2 x i16> [[V2:%.*]], <vscale x 2 x i16> [[V3:%.*]], <vscale x 2 x i16> [[V4:%.*]], <vscale x 2 x i16> [[V5:%.*]], <vscale x 2 x i16> [[V6:%.*]], <vscale x 2 x i16> [[V7:%.*]], i16* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2452 // CHECK-RV64-NEXT:    ret void
2453 //
test_vsseg8e16_v_i16mf2_m(vbool32_t mask,int16_t * base,vint16mf2_t v0,vint16mf2_t v1,vint16mf2_t v2,vint16mf2_t v3,vint16mf2_t v4,vint16mf2_t v5,vint16mf2_t v6,vint16mf2_t v7,size_t vl)2454 void test_vsseg8e16_v_i16mf2_m (vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) {
2455   return vsseg8e16_v_i16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
2456 }
2457 
2458 // CHECK-RV64-LABEL: @test_vsseg2e16_v_i16m1_m(
2459 // CHECK-RV64-NEXT:  entry:
2460 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], i16* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2461 // CHECK-RV64-NEXT:    ret void
2462 //
test_vsseg2e16_v_i16m1_m(vbool16_t mask,int16_t * base,vint16m1_t v0,vint16m1_t v1,size_t vl)2463 void test_vsseg2e16_v_i16m1_m (vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, size_t vl) {
2464   return vsseg2e16_v_i16m1_m(mask, base, v0, v1, vl);
2465 }
2466 
2467 // CHECK-RV64-LABEL: @test_vsseg3e16_v_i16m1_m(
2468 // CHECK-RV64-NEXT:  entry:
2469 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], <vscale x 4 x i16> [[V2:%.*]], i16* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2470 // CHECK-RV64-NEXT:    ret void
2471 //
test_vsseg3e16_v_i16m1_m(vbool16_t mask,int16_t * base,vint16m1_t v0,vint16m1_t v1,vint16m1_t v2,size_t vl)2472 void test_vsseg3e16_v_i16m1_m (vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) {
2473   return vsseg3e16_v_i16m1_m(mask, base, v0, v1, v2, vl);
2474 }
2475 
2476 // CHECK-RV64-LABEL: @test_vsseg4e16_v_i16m1_m(
2477 // CHECK-RV64-NEXT:  entry:
2478 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], <vscale x 4 x i16> [[V2:%.*]], <vscale x 4 x i16> [[V3:%.*]], i16* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2479 // CHECK-RV64-NEXT:    ret void
2480 //
test_vsseg4e16_v_i16m1_m(vbool16_t mask,int16_t * base,vint16m1_t v0,vint16m1_t v1,vint16m1_t v2,vint16m1_t v3,size_t vl)2481 void test_vsseg4e16_v_i16m1_m (vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) {
2482   return vsseg4e16_v_i16m1_m(mask, base, v0, v1, v2, v3, vl);
2483 }
2484 
2485 // CHECK-RV64-LABEL: @test_vsseg5e16_v_i16m1_m(
2486 // CHECK-RV64-NEXT:  entry:
2487 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], <vscale x 4 x i16> [[V2:%.*]], <vscale x 4 x i16> [[V3:%.*]], <vscale x 4 x i16> [[V4:%.*]], i16* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2488 // CHECK-RV64-NEXT:    ret void
2489 //
test_vsseg5e16_v_i16m1_m(vbool16_t mask,int16_t * base,vint16m1_t v0,vint16m1_t v1,vint16m1_t v2,vint16m1_t v3,vint16m1_t v4,size_t vl)2490 void test_vsseg5e16_v_i16m1_m (vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) {
2491   return vsseg5e16_v_i16m1_m(mask, base, v0, v1, v2, v3, v4, vl);
2492 }
2493 
2494 // CHECK-RV64-LABEL: @test_vsseg6e16_v_i16m1_m(
2495 // CHECK-RV64-NEXT:  entry:
2496 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], <vscale x 4 x i16> [[V2:%.*]], <vscale x 4 x i16> [[V3:%.*]], <vscale x 4 x i16> [[V4:%.*]], <vscale x 4 x i16> [[V5:%.*]], i16* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2497 // CHECK-RV64-NEXT:    ret void
2498 //
test_vsseg6e16_v_i16m1_m(vbool16_t mask,int16_t * base,vint16m1_t v0,vint16m1_t v1,vint16m1_t v2,vint16m1_t v3,vint16m1_t v4,vint16m1_t v5,size_t vl)2499 void test_vsseg6e16_v_i16m1_m (vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) {
2500   return vsseg6e16_v_i16m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
2501 }
2502 
2503 // CHECK-RV64-LABEL: @test_vsseg7e16_v_i16m1_m(
2504 // CHECK-RV64-NEXT:  entry:
2505 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], <vscale x 4 x i16> [[V2:%.*]], <vscale x 4 x i16> [[V3:%.*]], <vscale x 4 x i16> [[V4:%.*]], <vscale x 4 x i16> [[V5:%.*]], <vscale x 4 x i16> [[V6:%.*]], i16* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2506 // CHECK-RV64-NEXT:    ret void
2507 //
test_vsseg7e16_v_i16m1_m(vbool16_t mask,int16_t * base,vint16m1_t v0,vint16m1_t v1,vint16m1_t v2,vint16m1_t v3,vint16m1_t v4,vint16m1_t v5,vint16m1_t v6,size_t vl)2508 void test_vsseg7e16_v_i16m1_m (vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) {
2509   return vsseg7e16_v_i16m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
2510 }
2511 
2512 // CHECK-RV64-LABEL: @test_vsseg8e16_v_i16m1_m(
2513 // CHECK-RV64-NEXT:  entry:
2514 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], <vscale x 4 x i16> [[V2:%.*]], <vscale x 4 x i16> [[V3:%.*]], <vscale x 4 x i16> [[V4:%.*]], <vscale x 4 x i16> [[V5:%.*]], <vscale x 4 x i16> [[V6:%.*]], <vscale x 4 x i16> [[V7:%.*]], i16* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2515 // CHECK-RV64-NEXT:    ret void
2516 //
test_vsseg8e16_v_i16m1_m(vbool16_t mask,int16_t * base,vint16m1_t v0,vint16m1_t v1,vint16m1_t v2,vint16m1_t v3,vint16m1_t v4,vint16m1_t v5,vint16m1_t v6,vint16m1_t v7,size_t vl)2517 void test_vsseg8e16_v_i16m1_m (vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) {
2518   return vsseg8e16_v_i16m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
2519 }
2520 
2521 // CHECK-RV64-LABEL: @test_vsseg2e16_v_i16m2_m(
2522 // CHECK-RV64-NEXT:  entry:
2523 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv8i16.i64(<vscale x 8 x i16> [[V0:%.*]], <vscale x 8 x i16> [[V1:%.*]], i16* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2524 // CHECK-RV64-NEXT:    ret void
2525 //
test_vsseg2e16_v_i16m2_m(vbool8_t mask,int16_t * base,vint16m2_t v0,vint16m2_t v1,size_t vl)2526 void test_vsseg2e16_v_i16m2_m (vbool8_t mask, int16_t *base, vint16m2_t v0, vint16m2_t v1, size_t vl) {
2527   return vsseg2e16_v_i16m2_m(mask, base, v0, v1, vl);
2528 }
2529 
2530 // CHECK-RV64-LABEL: @test_vsseg3e16_v_i16m2_m(
2531 // CHECK-RV64-NEXT:  entry:
2532 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv8i16.i64(<vscale x 8 x i16> [[V0:%.*]], <vscale x 8 x i16> [[V1:%.*]], <vscale x 8 x i16> [[V2:%.*]], i16* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2533 // CHECK-RV64-NEXT:    ret void
2534 //
test_vsseg3e16_v_i16m2_m(vbool8_t mask,int16_t * base,vint16m2_t v0,vint16m2_t v1,vint16m2_t v2,size_t vl)2535 void test_vsseg3e16_v_i16m2_m (vbool8_t mask, int16_t *base, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) {
2536   return vsseg3e16_v_i16m2_m(mask, base, v0, v1, v2, vl);
2537 }
2538 
2539 // CHECK-RV64-LABEL: @test_vsseg4e16_v_i16m2_m(
2540 // CHECK-RV64-NEXT:  entry:
2541 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv8i16.i64(<vscale x 8 x i16> [[V0:%.*]], <vscale x 8 x i16> [[V1:%.*]], <vscale x 8 x i16> [[V2:%.*]], <vscale x 8 x i16> [[V3:%.*]], i16* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2542 // CHECK-RV64-NEXT:    ret void
2543 //
test_vsseg4e16_v_i16m2_m(vbool8_t mask,int16_t * base,vint16m2_t v0,vint16m2_t v1,vint16m2_t v2,vint16m2_t v3,size_t vl)2544 void test_vsseg4e16_v_i16m2_m (vbool8_t mask, int16_t *base, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) {
2545   return vsseg4e16_v_i16m2_m(mask, base, v0, v1, v2, v3, vl);
2546 }
2547 
2548 // CHECK-RV64-LABEL: @test_vsseg2e16_v_i16m4_m(
2549 // CHECK-RV64-NEXT:  entry:
2550 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv16i16.i64(<vscale x 16 x i16> [[V0:%.*]], <vscale x 16 x i16> [[V1:%.*]], i16* [[BASE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2551 // CHECK-RV64-NEXT:    ret void
2552 //
test_vsseg2e16_v_i16m4_m(vbool4_t mask,int16_t * base,vint16m4_t v0,vint16m4_t v1,size_t vl)2553 void test_vsseg2e16_v_i16m4_m (vbool4_t mask, int16_t *base, vint16m4_t v0, vint16m4_t v1, size_t vl) {
2554   return vsseg2e16_v_i16m4_m(mask, base, v0, v1, vl);
2555 }
2556 
2557 // CHECK-RV64-LABEL: @test_vsseg2e32_v_i32mf2_m(
2558 // CHECK-RV64-NEXT:  entry:
2559 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2560 // CHECK-RV64-NEXT:    ret void
2561 //
test_vsseg2e32_v_i32mf2_m(vbool64_t mask,int32_t * base,vint32mf2_t v0,vint32mf2_t v1,size_t vl)2562 void test_vsseg2e32_v_i32mf2_m (vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, size_t vl) {
2563   return vsseg2e32_v_i32mf2_m(mask, base, v0, v1, vl);
2564 }
2565 
2566 // CHECK-RV64-LABEL: @test_vsseg3e32_v_i32mf2_m(
2567 // CHECK-RV64-NEXT:  entry:
2568 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], <vscale x 1 x i32> [[V2:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2569 // CHECK-RV64-NEXT:    ret void
2570 //
test_vsseg3e32_v_i32mf2_m(vbool64_t mask,int32_t * base,vint32mf2_t v0,vint32mf2_t v1,vint32mf2_t v2,size_t vl)2571 void test_vsseg3e32_v_i32mf2_m (vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) {
2572   return vsseg3e32_v_i32mf2_m(mask, base, v0, v1, v2, vl);
2573 }
2574 
2575 // CHECK-RV64-LABEL: @test_vsseg4e32_v_i32mf2_m(
2576 // CHECK-RV64-NEXT:  entry:
2577 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], <vscale x 1 x i32> [[V2:%.*]], <vscale x 1 x i32> [[V3:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2578 // CHECK-RV64-NEXT:    ret void
2579 //
test_vsseg4e32_v_i32mf2_m(vbool64_t mask,int32_t * base,vint32mf2_t v0,vint32mf2_t v1,vint32mf2_t v2,vint32mf2_t v3,size_t vl)2580 void test_vsseg4e32_v_i32mf2_m (vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) {
2581   return vsseg4e32_v_i32mf2_m(mask, base, v0, v1, v2, v3, vl);
2582 }
2583 
2584 // CHECK-RV64-LABEL: @test_vsseg5e32_v_i32mf2_m(
2585 // CHECK-RV64-NEXT:  entry:
2586 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], <vscale x 1 x i32> [[V2:%.*]], <vscale x 1 x i32> [[V3:%.*]], <vscale x 1 x i32> [[V4:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2587 // CHECK-RV64-NEXT:    ret void
2588 //
test_vsseg5e32_v_i32mf2_m(vbool64_t mask,int32_t * base,vint32mf2_t v0,vint32mf2_t v1,vint32mf2_t v2,vint32mf2_t v3,vint32mf2_t v4,size_t vl)2589 void test_vsseg5e32_v_i32mf2_m (vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) {
2590   return vsseg5e32_v_i32mf2_m(mask, base, v0, v1, v2, v3, v4, vl);
2591 }
2592 
2593 // CHECK-RV64-LABEL: @test_vsseg6e32_v_i32mf2_m(
2594 // CHECK-RV64-NEXT:  entry:
2595 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], <vscale x 1 x i32> [[V2:%.*]], <vscale x 1 x i32> [[V3:%.*]], <vscale x 1 x i32> [[V4:%.*]], <vscale x 1 x i32> [[V5:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2596 // CHECK-RV64-NEXT:    ret void
2597 //
test_vsseg6e32_v_i32mf2_m(vbool64_t mask,int32_t * base,vint32mf2_t v0,vint32mf2_t v1,vint32mf2_t v2,vint32mf2_t v3,vint32mf2_t v4,vint32mf2_t v5,size_t vl)2598 void test_vsseg6e32_v_i32mf2_m (vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) {
2599   return vsseg6e32_v_i32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
2600 }
2601 
2602 // CHECK-RV64-LABEL: @test_vsseg7e32_v_i32mf2_m(
2603 // CHECK-RV64-NEXT:  entry:
2604 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], <vscale x 1 x i32> [[V2:%.*]], <vscale x 1 x i32> [[V3:%.*]], <vscale x 1 x i32> [[V4:%.*]], <vscale x 1 x i32> [[V5:%.*]], <vscale x 1 x i32> [[V6:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2605 // CHECK-RV64-NEXT:    ret void
2606 //
test_vsseg7e32_v_i32mf2_m(vbool64_t mask,int32_t * base,vint32mf2_t v0,vint32mf2_t v1,vint32mf2_t v2,vint32mf2_t v3,vint32mf2_t v4,vint32mf2_t v5,vint32mf2_t v6,size_t vl)2607 void test_vsseg7e32_v_i32mf2_m (vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) {
2608   return vsseg7e32_v_i32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
2609 }
2610 
2611 // CHECK-RV64-LABEL: @test_vsseg8e32_v_i32mf2_m(
2612 // CHECK-RV64-NEXT:  entry:
2613 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], <vscale x 1 x i32> [[V2:%.*]], <vscale x 1 x i32> [[V3:%.*]], <vscale x 1 x i32> [[V4:%.*]], <vscale x 1 x i32> [[V5:%.*]], <vscale x 1 x i32> [[V6:%.*]], <vscale x 1 x i32> [[V7:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2614 // CHECK-RV64-NEXT:    ret void
2615 //
test_vsseg8e32_v_i32mf2_m(vbool64_t mask,int32_t * base,vint32mf2_t v0,vint32mf2_t v1,vint32mf2_t v2,vint32mf2_t v3,vint32mf2_t v4,vint32mf2_t v5,vint32mf2_t v6,vint32mf2_t v7,size_t vl)2616 void test_vsseg8e32_v_i32mf2_m (vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) {
2617   return vsseg8e32_v_i32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
2618 }
2619 
2620 // CHECK-RV64-LABEL: @test_vsseg2e32_v_i32m1_m(
2621 // CHECK-RV64-NEXT:  entry:
2622 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], i32* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2623 // CHECK-RV64-NEXT:    ret void
2624 //
test_vsseg2e32_v_i32m1_m(vbool32_t mask,int32_t * base,vint32m1_t v0,vint32m1_t v1,size_t vl)2625 void test_vsseg2e32_v_i32m1_m (vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, size_t vl) {
2626   return vsseg2e32_v_i32m1_m(mask, base, v0, v1, vl);
2627 }
2628 
2629 // CHECK-RV64-LABEL: @test_vsseg3e32_v_i32m1_m(
2630 // CHECK-RV64-NEXT:  entry:
2631 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], <vscale x 2 x i32> [[V2:%.*]], i32* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2632 // CHECK-RV64-NEXT:    ret void
2633 //
test_vsseg3e32_v_i32m1_m(vbool32_t mask,int32_t * base,vint32m1_t v0,vint32m1_t v1,vint32m1_t v2,size_t vl)2634 void test_vsseg3e32_v_i32m1_m (vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) {
2635   return vsseg3e32_v_i32m1_m(mask, base, v0, v1, v2, vl);
2636 }
2637 
2638 // CHECK-RV64-LABEL: @test_vsseg4e32_v_i32m1_m(
2639 // CHECK-RV64-NEXT:  entry:
2640 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], <vscale x 2 x i32> [[V2:%.*]], <vscale x 2 x i32> [[V3:%.*]], i32* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2641 // CHECK-RV64-NEXT:    ret void
2642 //
test_vsseg4e32_v_i32m1_m(vbool32_t mask,int32_t * base,vint32m1_t v0,vint32m1_t v1,vint32m1_t v2,vint32m1_t v3,size_t vl)2643 void test_vsseg4e32_v_i32m1_m (vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) {
2644   return vsseg4e32_v_i32m1_m(mask, base, v0, v1, v2, v3, vl);
2645 }
2646 
2647 // CHECK-RV64-LABEL: @test_vsseg5e32_v_i32m1_m(
2648 // CHECK-RV64-NEXT:  entry:
2649 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], <vscale x 2 x i32> [[V2:%.*]], <vscale x 2 x i32> [[V3:%.*]], <vscale x 2 x i32> [[V4:%.*]], i32* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2650 // CHECK-RV64-NEXT:    ret void
2651 //
test_vsseg5e32_v_i32m1_m(vbool32_t mask,int32_t * base,vint32m1_t v0,vint32m1_t v1,vint32m1_t v2,vint32m1_t v3,vint32m1_t v4,size_t vl)2652 void test_vsseg5e32_v_i32m1_m (vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) {
2653   return vsseg5e32_v_i32m1_m(mask, base, v0, v1, v2, v3, v4, vl);
2654 }
2655 
2656 // CHECK-RV64-LABEL: @test_vsseg6e32_v_i32m1_m(
2657 // CHECK-RV64-NEXT:  entry:
2658 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], <vscale x 2 x i32> [[V2:%.*]], <vscale x 2 x i32> [[V3:%.*]], <vscale x 2 x i32> [[V4:%.*]], <vscale x 2 x i32> [[V5:%.*]], i32* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2659 // CHECK-RV64-NEXT:    ret void
2660 //
test_vsseg6e32_v_i32m1_m(vbool32_t mask,int32_t * base,vint32m1_t v0,vint32m1_t v1,vint32m1_t v2,vint32m1_t v3,vint32m1_t v4,vint32m1_t v5,size_t vl)2661 void test_vsseg6e32_v_i32m1_m (vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) {
2662   return vsseg6e32_v_i32m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
2663 }
2664 
2665 // CHECK-RV64-LABEL: @test_vsseg7e32_v_i32m1_m(
2666 // CHECK-RV64-NEXT:  entry:
2667 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], <vscale x 2 x i32> [[V2:%.*]], <vscale x 2 x i32> [[V3:%.*]], <vscale x 2 x i32> [[V4:%.*]], <vscale x 2 x i32> [[V5:%.*]], <vscale x 2 x i32> [[V6:%.*]], i32* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2668 // CHECK-RV64-NEXT:    ret void
2669 //
test_vsseg7e32_v_i32m1_m(vbool32_t mask,int32_t * base,vint32m1_t v0,vint32m1_t v1,vint32m1_t v2,vint32m1_t v3,vint32m1_t v4,vint32m1_t v5,vint32m1_t v6,size_t vl)2670 void test_vsseg7e32_v_i32m1_m (vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) {
2671   return vsseg7e32_v_i32m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
2672 }
2673 
2674 // CHECK-RV64-LABEL: @test_vsseg8e32_v_i32m1_m(
2675 // CHECK-RV64-NEXT:  entry:
2676 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], <vscale x 2 x i32> [[V2:%.*]], <vscale x 2 x i32> [[V3:%.*]], <vscale x 2 x i32> [[V4:%.*]], <vscale x 2 x i32> [[V5:%.*]], <vscale x 2 x i32> [[V6:%.*]], <vscale x 2 x i32> [[V7:%.*]], i32* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2677 // CHECK-RV64-NEXT:    ret void
2678 //
test_vsseg8e32_v_i32m1_m(vbool32_t mask,int32_t * base,vint32m1_t v0,vint32m1_t v1,vint32m1_t v2,vint32m1_t v3,vint32m1_t v4,vint32m1_t v5,vint32m1_t v6,vint32m1_t v7,size_t vl)2679 void test_vsseg8e32_v_i32m1_m (vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) {
2680   return vsseg8e32_v_i32m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
2681 }
2682 
2683 // CHECK-RV64-LABEL: @test_vsseg2e32_v_i32m2_m(
2684 // CHECK-RV64-NEXT:  entry:
2685 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv4i32.i64(<vscale x 4 x i32> [[V0:%.*]], <vscale x 4 x i32> [[V1:%.*]], i32* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2686 // CHECK-RV64-NEXT:    ret void
2687 //
test_vsseg2e32_v_i32m2_m(vbool16_t mask,int32_t * base,vint32m2_t v0,vint32m2_t v1,size_t vl)2688 void test_vsseg2e32_v_i32m2_m (vbool16_t mask, int32_t *base, vint32m2_t v0, vint32m2_t v1, size_t vl) {
2689   return vsseg2e32_v_i32m2_m(mask, base, v0, v1, vl);
2690 }
2691 
2692 // CHECK-RV64-LABEL: @test_vsseg3e32_v_i32m2_m(
2693 // CHECK-RV64-NEXT:  entry:
2694 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv4i32.i64(<vscale x 4 x i32> [[V0:%.*]], <vscale x 4 x i32> [[V1:%.*]], <vscale x 4 x i32> [[V2:%.*]], i32* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2695 // CHECK-RV64-NEXT:    ret void
2696 //
test_vsseg3e32_v_i32m2_m(vbool16_t mask,int32_t * base,vint32m2_t v0,vint32m2_t v1,vint32m2_t v2,size_t vl)2697 void test_vsseg3e32_v_i32m2_m (vbool16_t mask, int32_t *base, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) {
2698   return vsseg3e32_v_i32m2_m(mask, base, v0, v1, v2, vl);
2699 }
2700 
2701 // CHECK-RV64-LABEL: @test_vsseg4e32_v_i32m2_m(
2702 // CHECK-RV64-NEXT:  entry:
2703 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv4i32.i64(<vscale x 4 x i32> [[V0:%.*]], <vscale x 4 x i32> [[V1:%.*]], <vscale x 4 x i32> [[V2:%.*]], <vscale x 4 x i32> [[V3:%.*]], i32* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2704 // CHECK-RV64-NEXT:    ret void
2705 //
test_vsseg4e32_v_i32m2_m(vbool16_t mask,int32_t * base,vint32m2_t v0,vint32m2_t v1,vint32m2_t v2,vint32m2_t v3,size_t vl)2706 void test_vsseg4e32_v_i32m2_m (vbool16_t mask, int32_t *base, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) {
2707   return vsseg4e32_v_i32m2_m(mask, base, v0, v1, v2, v3, vl);
2708 }
2709 
2710 // CHECK-RV64-LABEL: @test_vsseg2e32_v_i32m4_m(
2711 // CHECK-RV64-NEXT:  entry:
2712 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv8i32.i64(<vscale x 8 x i32> [[V0:%.*]], <vscale x 8 x i32> [[V1:%.*]], i32* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2713 // CHECK-RV64-NEXT:    ret void
2714 //
test_vsseg2e32_v_i32m4_m(vbool8_t mask,int32_t * base,vint32m4_t v0,vint32m4_t v1,size_t vl)2715 void test_vsseg2e32_v_i32m4_m (vbool8_t mask, int32_t *base, vint32m4_t v0, vint32m4_t v1, size_t vl) {
2716   return vsseg2e32_v_i32m4_m(mask, base, v0, v1, vl);
2717 }
2718 
2719 // CHECK-RV64-LABEL: @test_vsseg2e64_v_i64m1_m(
2720 // CHECK-RV64-NEXT:  entry:
2721 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], i64* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2722 // CHECK-RV64-NEXT:    ret void
2723 //
test_vsseg2e64_v_i64m1_m(vbool64_t mask,int64_t * base,vint64m1_t v0,vint64m1_t v1,size_t vl)2724 void test_vsseg2e64_v_i64m1_m (vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, size_t vl) {
2725   return vsseg2e64_v_i64m1_m(mask, base, v0, v1, vl);
2726 }
2727 
2728 // CHECK-RV64-LABEL: @test_vsseg3e64_v_i64m1_m(
2729 // CHECK-RV64-NEXT:  entry:
2730 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], <vscale x 1 x i64> [[V2:%.*]], i64* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2731 // CHECK-RV64-NEXT:    ret void
2732 //
test_vsseg3e64_v_i64m1_m(vbool64_t mask,int64_t * base,vint64m1_t v0,vint64m1_t v1,vint64m1_t v2,size_t vl)2733 void test_vsseg3e64_v_i64m1_m (vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) {
2734   return vsseg3e64_v_i64m1_m(mask, base, v0, v1, v2, vl);
2735 }
2736 
2737 // CHECK-RV64-LABEL: @test_vsseg4e64_v_i64m1_m(
2738 // CHECK-RV64-NEXT:  entry:
2739 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], <vscale x 1 x i64> [[V2:%.*]], <vscale x 1 x i64> [[V3:%.*]], i64* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2740 // CHECK-RV64-NEXT:    ret void
2741 //
test_vsseg4e64_v_i64m1_m(vbool64_t mask,int64_t * base,vint64m1_t v0,vint64m1_t v1,vint64m1_t v2,vint64m1_t v3,size_t vl)2742 void test_vsseg4e64_v_i64m1_m (vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) {
2743   return vsseg4e64_v_i64m1_m(mask, base, v0, v1, v2, v3, vl);
2744 }
2745 
2746 // CHECK-RV64-LABEL: @test_vsseg5e64_v_i64m1_m(
2747 // CHECK-RV64-NEXT:  entry:
2748 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], <vscale x 1 x i64> [[V2:%.*]], <vscale x 1 x i64> [[V3:%.*]], <vscale x 1 x i64> [[V4:%.*]], i64* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2749 // CHECK-RV64-NEXT:    ret void
2750 //
test_vsseg5e64_v_i64m1_m(vbool64_t mask,int64_t * base,vint64m1_t v0,vint64m1_t v1,vint64m1_t v2,vint64m1_t v3,vint64m1_t v4,size_t vl)2751 void test_vsseg5e64_v_i64m1_m (vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) {
2752   return vsseg5e64_v_i64m1_m(mask, base, v0, v1, v2, v3, v4, vl);
2753 }
2754 
2755 // CHECK-RV64-LABEL: @test_vsseg6e64_v_i64m1_m(
2756 // CHECK-RV64-NEXT:  entry:
2757 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], <vscale x 1 x i64> [[V2:%.*]], <vscale x 1 x i64> [[V3:%.*]], <vscale x 1 x i64> [[V4:%.*]], <vscale x 1 x i64> [[V5:%.*]], i64* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2758 // CHECK-RV64-NEXT:    ret void
2759 //
test_vsseg6e64_v_i64m1_m(vbool64_t mask,int64_t * base,vint64m1_t v0,vint64m1_t v1,vint64m1_t v2,vint64m1_t v3,vint64m1_t v4,vint64m1_t v5,size_t vl)2760 void test_vsseg6e64_v_i64m1_m (vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) {
2761   return vsseg6e64_v_i64m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
2762 }
2763 
2764 // CHECK-RV64-LABEL: @test_vsseg7e64_v_i64m1_m(
2765 // CHECK-RV64-NEXT:  entry:
2766 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], <vscale x 1 x i64> [[V2:%.*]], <vscale x 1 x i64> [[V3:%.*]], <vscale x 1 x i64> [[V4:%.*]], <vscale x 1 x i64> [[V5:%.*]], <vscale x 1 x i64> [[V6:%.*]], i64* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2767 // CHECK-RV64-NEXT:    ret void
2768 //
test_vsseg7e64_v_i64m1_m(vbool64_t mask,int64_t * base,vint64m1_t v0,vint64m1_t v1,vint64m1_t v2,vint64m1_t v3,vint64m1_t v4,vint64m1_t v5,vint64m1_t v6,size_t vl)2769 void test_vsseg7e64_v_i64m1_m (vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) {
2770   return vsseg7e64_v_i64m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
2771 }
2772 
2773 // CHECK-RV64-LABEL: @test_vsseg8e64_v_i64m1_m(
2774 // CHECK-RV64-NEXT:  entry:
2775 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], <vscale x 1 x i64> [[V2:%.*]], <vscale x 1 x i64> [[V3:%.*]], <vscale x 1 x i64> [[V4:%.*]], <vscale x 1 x i64> [[V5:%.*]], <vscale x 1 x i64> [[V6:%.*]], <vscale x 1 x i64> [[V7:%.*]], i64* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2776 // CHECK-RV64-NEXT:    ret void
2777 //
test_vsseg8e64_v_i64m1_m(vbool64_t mask,int64_t * base,vint64m1_t v0,vint64m1_t v1,vint64m1_t v2,vint64m1_t v3,vint64m1_t v4,vint64m1_t v5,vint64m1_t v6,vint64m1_t v7,size_t vl)2778 void test_vsseg8e64_v_i64m1_m (vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) {
2779   return vsseg8e64_v_i64m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
2780 }
2781 
2782 // CHECK-RV64-LABEL: @test_vsseg2e64_v_i64m2_m(
2783 // CHECK-RV64-NEXT:  entry:
2784 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv2i64.i64(<vscale x 2 x i64> [[V0:%.*]], <vscale x 2 x i64> [[V1:%.*]], i64* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2785 // CHECK-RV64-NEXT:    ret void
2786 //
test_vsseg2e64_v_i64m2_m(vbool32_t mask,int64_t * base,vint64m2_t v0,vint64m2_t v1,size_t vl)2787 void test_vsseg2e64_v_i64m2_m (vbool32_t mask, int64_t *base, vint64m2_t v0, vint64m2_t v1, size_t vl) {
2788   return vsseg2e64_v_i64m2_m(mask, base, v0, v1, vl);
2789 }
2790 
2791 // CHECK-RV64-LABEL: @test_vsseg3e64_v_i64m2_m(
2792 // CHECK-RV64-NEXT:  entry:
2793 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv2i64.i64(<vscale x 2 x i64> [[V0:%.*]], <vscale x 2 x i64> [[V1:%.*]], <vscale x 2 x i64> [[V2:%.*]], i64* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2794 // CHECK-RV64-NEXT:    ret void
2795 //
test_vsseg3e64_v_i64m2_m(vbool32_t mask,int64_t * base,vint64m2_t v0,vint64m2_t v1,vint64m2_t v2,size_t vl)2796 void test_vsseg3e64_v_i64m2_m (vbool32_t mask, int64_t *base, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) {
2797   return vsseg3e64_v_i64m2_m(mask, base, v0, v1, v2, vl);
2798 }
2799 
2800 // CHECK-RV64-LABEL: @test_vsseg4e64_v_i64m2_m(
2801 // CHECK-RV64-NEXT:  entry:
2802 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv2i64.i64(<vscale x 2 x i64> [[V0:%.*]], <vscale x 2 x i64> [[V1:%.*]], <vscale x 2 x i64> [[V2:%.*]], <vscale x 2 x i64> [[V3:%.*]], i64* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2803 // CHECK-RV64-NEXT:    ret void
2804 //
test_vsseg4e64_v_i64m2_m(vbool32_t mask,int64_t * base,vint64m2_t v0,vint64m2_t v1,vint64m2_t v2,vint64m2_t v3,size_t vl)2805 void test_vsseg4e64_v_i64m2_m (vbool32_t mask, int64_t *base, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) {
2806   return vsseg4e64_v_i64m2_m(mask, base, v0, v1, v2, v3, vl);
2807 }
2808 
2809 // CHECK-RV64-LABEL: @test_vsseg2e64_v_i64m4_m(
2810 // CHECK-RV64-NEXT:  entry:
2811 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv4i64.i64(<vscale x 4 x i64> [[V0:%.*]], <vscale x 4 x i64> [[V1:%.*]], i64* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2812 // CHECK-RV64-NEXT:    ret void
2813 //
test_vsseg2e64_v_i64m4_m(vbool16_t mask,int64_t * base,vint64m4_t v0,vint64m4_t v1,size_t vl)2814 void test_vsseg2e64_v_i64m4_m (vbool16_t mask, int64_t *base, vint64m4_t v0, vint64m4_t v1, size_t vl) {
2815   return vsseg2e64_v_i64m4_m(mask, base, v0, v1, vl);
2816 }
2817 
2818 // CHECK-RV64-LABEL: @test_vsseg2e8_v_u8mf8_m(
2819 // CHECK-RV64-NEXT:  entry:
2820 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], i8* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2821 // CHECK-RV64-NEXT:    ret void
2822 //
test_vsseg2e8_v_u8mf8_m(vbool64_t mask,uint8_t * base,vuint8mf8_t v0,vuint8mf8_t v1,size_t vl)2823 void test_vsseg2e8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) {
2824   return vsseg2e8_v_u8mf8_m(mask, base, v0, v1, vl);
2825 }
2826 
2827 // CHECK-RV64-LABEL: @test_vsseg3e8_v_u8mf8_m(
2828 // CHECK-RV64-NEXT:  entry:
2829 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], <vscale x 1 x i8> [[V2:%.*]], i8* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2830 // CHECK-RV64-NEXT:    ret void
2831 //
test_vsseg3e8_v_u8mf8_m(vbool64_t mask,uint8_t * base,vuint8mf8_t v0,vuint8mf8_t v1,vuint8mf8_t v2,size_t vl)2832 void test_vsseg3e8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) {
2833   return vsseg3e8_v_u8mf8_m(mask, base, v0, v1, v2, vl);
2834 }
2835 
2836 // CHECK-RV64-LABEL: @test_vsseg4e8_v_u8mf8_m(
2837 // CHECK-RV64-NEXT:  entry:
2838 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], <vscale x 1 x i8> [[V2:%.*]], <vscale x 1 x i8> [[V3:%.*]], i8* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2839 // CHECK-RV64-NEXT:    ret void
2840 //
test_vsseg4e8_v_u8mf8_m(vbool64_t mask,uint8_t * base,vuint8mf8_t v0,vuint8mf8_t v1,vuint8mf8_t v2,vuint8mf8_t v3,size_t vl)2841 void test_vsseg4e8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) {
2842   return vsseg4e8_v_u8mf8_m(mask, base, v0, v1, v2, v3, vl);
2843 }
2844 
2845 // CHECK-RV64-LABEL: @test_vsseg5e8_v_u8mf8_m(
2846 // CHECK-RV64-NEXT:  entry:
2847 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], <vscale x 1 x i8> [[V2:%.*]], <vscale x 1 x i8> [[V3:%.*]], <vscale x 1 x i8> [[V4:%.*]], i8* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2848 // CHECK-RV64-NEXT:    ret void
2849 //
test_vsseg5e8_v_u8mf8_m(vbool64_t mask,uint8_t * base,vuint8mf8_t v0,vuint8mf8_t v1,vuint8mf8_t v2,vuint8mf8_t v3,vuint8mf8_t v4,size_t vl)2850 void test_vsseg5e8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) {
2851   return vsseg5e8_v_u8mf8_m(mask, base, v0, v1, v2, v3, v4, vl);
2852 }
2853 
2854 // CHECK-RV64-LABEL: @test_vsseg6e8_v_u8mf8_m(
2855 // CHECK-RV64-NEXT:  entry:
2856 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], <vscale x 1 x i8> [[V2:%.*]], <vscale x 1 x i8> [[V3:%.*]], <vscale x 1 x i8> [[V4:%.*]], <vscale x 1 x i8> [[V5:%.*]], i8* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2857 // CHECK-RV64-NEXT:    ret void
2858 //
test_vsseg6e8_v_u8mf8_m(vbool64_t mask,uint8_t * base,vuint8mf8_t v0,vuint8mf8_t v1,vuint8mf8_t v2,vuint8mf8_t v3,vuint8mf8_t v4,vuint8mf8_t v5,size_t vl)2859 void test_vsseg6e8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) {
2860   return vsseg6e8_v_u8mf8_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
2861 }
2862 
2863 // CHECK-RV64-LABEL: @test_vsseg7e8_v_u8mf8_m(
2864 // CHECK-RV64-NEXT:  entry:
2865 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], <vscale x 1 x i8> [[V2:%.*]], <vscale x 1 x i8> [[V3:%.*]], <vscale x 1 x i8> [[V4:%.*]], <vscale x 1 x i8> [[V5:%.*]], <vscale x 1 x i8> [[V6:%.*]], i8* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2866 // CHECK-RV64-NEXT:    ret void
2867 //
test_vsseg7e8_v_u8mf8_m(vbool64_t mask,uint8_t * base,vuint8mf8_t v0,vuint8mf8_t v1,vuint8mf8_t v2,vuint8mf8_t v3,vuint8mf8_t v4,vuint8mf8_t v5,vuint8mf8_t v6,size_t vl)2868 void test_vsseg7e8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) {
2869   return vsseg7e8_v_u8mf8_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
2870 }
2871 
2872 // CHECK-RV64-LABEL: @test_vsseg8e8_v_u8mf8_m(
2873 // CHECK-RV64-NEXT:  entry:
2874 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv1i8.i64(<vscale x 1 x i8> [[V0:%.*]], <vscale x 1 x i8> [[V1:%.*]], <vscale x 1 x i8> [[V2:%.*]], <vscale x 1 x i8> [[V3:%.*]], <vscale x 1 x i8> [[V4:%.*]], <vscale x 1 x i8> [[V5:%.*]], <vscale x 1 x i8> [[V6:%.*]], <vscale x 1 x i8> [[V7:%.*]], i8* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2875 // CHECK-RV64-NEXT:    ret void
2876 //
test_vsseg8e8_v_u8mf8_m(vbool64_t mask,uint8_t * base,vuint8mf8_t v0,vuint8mf8_t v1,vuint8mf8_t v2,vuint8mf8_t v3,vuint8mf8_t v4,vuint8mf8_t v5,vuint8mf8_t v6,vuint8mf8_t v7,size_t vl)2877 void test_vsseg8e8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) {
2878   return vsseg8e8_v_u8mf8_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
2879 }
2880 
2881 // CHECK-RV64-LABEL: @test_vsseg2e8_v_u8mf4_m(
2882 // CHECK-RV64-NEXT:  entry:
2883 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], i8* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2884 // CHECK-RV64-NEXT:    ret void
2885 //
test_vsseg2e8_v_u8mf4_m(vbool32_t mask,uint8_t * base,vuint8mf4_t v0,vuint8mf4_t v1,size_t vl)2886 void test_vsseg2e8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) {
2887   return vsseg2e8_v_u8mf4_m(mask, base, v0, v1, vl);
2888 }
2889 
2890 // CHECK-RV64-LABEL: @test_vsseg3e8_v_u8mf4_m(
2891 // CHECK-RV64-NEXT:  entry:
2892 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], <vscale x 2 x i8> [[V2:%.*]], i8* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2893 // CHECK-RV64-NEXT:    ret void
2894 //
test_vsseg3e8_v_u8mf4_m(vbool32_t mask,uint8_t * base,vuint8mf4_t v0,vuint8mf4_t v1,vuint8mf4_t v2,size_t vl)2895 void test_vsseg3e8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) {
2896   return vsseg3e8_v_u8mf4_m(mask, base, v0, v1, v2, vl);
2897 }
2898 
2899 // CHECK-RV64-LABEL: @test_vsseg4e8_v_u8mf4_m(
2900 // CHECK-RV64-NEXT:  entry:
2901 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], <vscale x 2 x i8> [[V2:%.*]], <vscale x 2 x i8> [[V3:%.*]], i8* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2902 // CHECK-RV64-NEXT:    ret void
2903 //
test_vsseg4e8_v_u8mf4_m(vbool32_t mask,uint8_t * base,vuint8mf4_t v0,vuint8mf4_t v1,vuint8mf4_t v2,vuint8mf4_t v3,size_t vl)2904 void test_vsseg4e8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) {
2905   return vsseg4e8_v_u8mf4_m(mask, base, v0, v1, v2, v3, vl);
2906 }
2907 
2908 // CHECK-RV64-LABEL: @test_vsseg5e8_v_u8mf4_m(
2909 // CHECK-RV64-NEXT:  entry:
2910 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], <vscale x 2 x i8> [[V2:%.*]], <vscale x 2 x i8> [[V3:%.*]], <vscale x 2 x i8> [[V4:%.*]], i8* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2911 // CHECK-RV64-NEXT:    ret void
2912 //
test_vsseg5e8_v_u8mf4_m(vbool32_t mask,uint8_t * base,vuint8mf4_t v0,vuint8mf4_t v1,vuint8mf4_t v2,vuint8mf4_t v3,vuint8mf4_t v4,size_t vl)2913 void test_vsseg5e8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) {
2914   return vsseg5e8_v_u8mf4_m(mask, base, v0, v1, v2, v3, v4, vl);
2915 }
2916 
2917 // CHECK-RV64-LABEL: @test_vsseg6e8_v_u8mf4_m(
2918 // CHECK-RV64-NEXT:  entry:
2919 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], <vscale x 2 x i8> [[V2:%.*]], <vscale x 2 x i8> [[V3:%.*]], <vscale x 2 x i8> [[V4:%.*]], <vscale x 2 x i8> [[V5:%.*]], i8* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2920 // CHECK-RV64-NEXT:    ret void
2921 //
test_vsseg6e8_v_u8mf4_m(vbool32_t mask,uint8_t * base,vuint8mf4_t v0,vuint8mf4_t v1,vuint8mf4_t v2,vuint8mf4_t v3,vuint8mf4_t v4,vuint8mf4_t v5,size_t vl)2922 void test_vsseg6e8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) {
2923   return vsseg6e8_v_u8mf4_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
2924 }
2925 
2926 // CHECK-RV64-LABEL: @test_vsseg7e8_v_u8mf4_m(
2927 // CHECK-RV64-NEXT:  entry:
2928 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], <vscale x 2 x i8> [[V2:%.*]], <vscale x 2 x i8> [[V3:%.*]], <vscale x 2 x i8> [[V4:%.*]], <vscale x 2 x i8> [[V5:%.*]], <vscale x 2 x i8> [[V6:%.*]], i8* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2929 // CHECK-RV64-NEXT:    ret void
2930 //
test_vsseg7e8_v_u8mf4_m(vbool32_t mask,uint8_t * base,vuint8mf4_t v0,vuint8mf4_t v1,vuint8mf4_t v2,vuint8mf4_t v3,vuint8mf4_t v4,vuint8mf4_t v5,vuint8mf4_t v6,size_t vl)2931 void test_vsseg7e8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) {
2932   return vsseg7e8_v_u8mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
2933 }
2934 
2935 // CHECK-RV64-LABEL: @test_vsseg8e8_v_u8mf4_m(
2936 // CHECK-RV64-NEXT:  entry:
2937 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv2i8.i64(<vscale x 2 x i8> [[V0:%.*]], <vscale x 2 x i8> [[V1:%.*]], <vscale x 2 x i8> [[V2:%.*]], <vscale x 2 x i8> [[V3:%.*]], <vscale x 2 x i8> [[V4:%.*]], <vscale x 2 x i8> [[V5:%.*]], <vscale x 2 x i8> [[V6:%.*]], <vscale x 2 x i8> [[V7:%.*]], i8* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2938 // CHECK-RV64-NEXT:    ret void
2939 //
test_vsseg8e8_v_u8mf4_m(vbool32_t mask,uint8_t * base,vuint8mf4_t v0,vuint8mf4_t v1,vuint8mf4_t v2,vuint8mf4_t v3,vuint8mf4_t v4,vuint8mf4_t v5,vuint8mf4_t v6,vuint8mf4_t v7,size_t vl)2940 void test_vsseg8e8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) {
2941   return vsseg8e8_v_u8mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
2942 }
2943 
2944 // CHECK-RV64-LABEL: @test_vsseg2e8_v_u8mf2_m(
2945 // CHECK-RV64-NEXT:  entry:
2946 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], i8* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2947 // CHECK-RV64-NEXT:    ret void
2948 //
test_vsseg2e8_v_u8mf2_m(vbool16_t mask,uint8_t * base,vuint8mf2_t v0,vuint8mf2_t v1,size_t vl)2949 void test_vsseg2e8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) {
2950   return vsseg2e8_v_u8mf2_m(mask, base, v0, v1, vl);
2951 }
2952 
2953 // CHECK-RV64-LABEL: @test_vsseg3e8_v_u8mf2_m(
2954 // CHECK-RV64-NEXT:  entry:
2955 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], <vscale x 4 x i8> [[V2:%.*]], i8* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2956 // CHECK-RV64-NEXT:    ret void
2957 //
test_vsseg3e8_v_u8mf2_m(vbool16_t mask,uint8_t * base,vuint8mf2_t v0,vuint8mf2_t v1,vuint8mf2_t v2,size_t vl)2958 void test_vsseg3e8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) {
2959   return vsseg3e8_v_u8mf2_m(mask, base, v0, v1, v2, vl);
2960 }
2961 
2962 // CHECK-RV64-LABEL: @test_vsseg4e8_v_u8mf2_m(
2963 // CHECK-RV64-NEXT:  entry:
2964 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], <vscale x 4 x i8> [[V2:%.*]], <vscale x 4 x i8> [[V3:%.*]], i8* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2965 // CHECK-RV64-NEXT:    ret void
2966 //
test_vsseg4e8_v_u8mf2_m(vbool16_t mask,uint8_t * base,vuint8mf2_t v0,vuint8mf2_t v1,vuint8mf2_t v2,vuint8mf2_t v3,size_t vl)2967 void test_vsseg4e8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) {
2968   return vsseg4e8_v_u8mf2_m(mask, base, v0, v1, v2, v3, vl);
2969 }
2970 
2971 // CHECK-RV64-LABEL: @test_vsseg5e8_v_u8mf2_m(
2972 // CHECK-RV64-NEXT:  entry:
2973 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], <vscale x 4 x i8> [[V2:%.*]], <vscale x 4 x i8> [[V3:%.*]], <vscale x 4 x i8> [[V4:%.*]], i8* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2974 // CHECK-RV64-NEXT:    ret void
2975 //
test_vsseg5e8_v_u8mf2_m(vbool16_t mask,uint8_t * base,vuint8mf2_t v0,vuint8mf2_t v1,vuint8mf2_t v2,vuint8mf2_t v3,vuint8mf2_t v4,size_t vl)2976 void test_vsseg5e8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) {
2977   return vsseg5e8_v_u8mf2_m(mask, base, v0, v1, v2, v3, v4, vl);
2978 }
2979 
2980 // CHECK-RV64-LABEL: @test_vsseg6e8_v_u8mf2_m(
2981 // CHECK-RV64-NEXT:  entry:
2982 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], <vscale x 4 x i8> [[V2:%.*]], <vscale x 4 x i8> [[V3:%.*]], <vscale x 4 x i8> [[V4:%.*]], <vscale x 4 x i8> [[V5:%.*]], i8* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2983 // CHECK-RV64-NEXT:    ret void
2984 //
test_vsseg6e8_v_u8mf2_m(vbool16_t mask,uint8_t * base,vuint8mf2_t v0,vuint8mf2_t v1,vuint8mf2_t v2,vuint8mf2_t v3,vuint8mf2_t v4,vuint8mf2_t v5,size_t vl)2985 void test_vsseg6e8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) {
2986   return vsseg6e8_v_u8mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
2987 }
2988 
2989 // CHECK-RV64-LABEL: @test_vsseg7e8_v_u8mf2_m(
2990 // CHECK-RV64-NEXT:  entry:
2991 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], <vscale x 4 x i8> [[V2:%.*]], <vscale x 4 x i8> [[V3:%.*]], <vscale x 4 x i8> [[V4:%.*]], <vscale x 4 x i8> [[V5:%.*]], <vscale x 4 x i8> [[V6:%.*]], i8* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2992 // CHECK-RV64-NEXT:    ret void
2993 //
test_vsseg7e8_v_u8mf2_m(vbool16_t mask,uint8_t * base,vuint8mf2_t v0,vuint8mf2_t v1,vuint8mf2_t v2,vuint8mf2_t v3,vuint8mf2_t v4,vuint8mf2_t v5,vuint8mf2_t v6,size_t vl)2994 void test_vsseg7e8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) {
2995   return vsseg7e8_v_u8mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
2996 }
2997 
2998 // CHECK-RV64-LABEL: @test_vsseg8e8_v_u8mf2_m(
2999 // CHECK-RV64-NEXT:  entry:
3000 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv4i8.i64(<vscale x 4 x i8> [[V0:%.*]], <vscale x 4 x i8> [[V1:%.*]], <vscale x 4 x i8> [[V2:%.*]], <vscale x 4 x i8> [[V3:%.*]], <vscale x 4 x i8> [[V4:%.*]], <vscale x 4 x i8> [[V5:%.*]], <vscale x 4 x i8> [[V6:%.*]], <vscale x 4 x i8> [[V7:%.*]], i8* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3001 // CHECK-RV64-NEXT:    ret void
3002 //
test_vsseg8e8_v_u8mf2_m(vbool16_t mask,uint8_t * base,vuint8mf2_t v0,vuint8mf2_t v1,vuint8mf2_t v2,vuint8mf2_t v3,vuint8mf2_t v4,vuint8mf2_t v5,vuint8mf2_t v6,vuint8mf2_t v7,size_t vl)3003 void test_vsseg8e8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) {
3004   return vsseg8e8_v_u8mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
3005 }
3006 
3007 // CHECK-RV64-LABEL: @test_vsseg2e8_v_u8m1_m(
3008 // CHECK-RV64-NEXT:  entry:
3009 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], i8* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3010 // CHECK-RV64-NEXT:    ret void
3011 //
test_vsseg2e8_v_u8m1_m(vbool8_t mask,uint8_t * base,vuint8m1_t v0,vuint8m1_t v1,size_t vl)3012 void test_vsseg2e8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, size_t vl) {
3013   return vsseg2e8_v_u8m1_m(mask, base, v0, v1, vl);
3014 }
3015 
3016 // CHECK-RV64-LABEL: @test_vsseg3e8_v_u8m1_m(
3017 // CHECK-RV64-NEXT:  entry:
3018 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], <vscale x 8 x i8> [[V2:%.*]], i8* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3019 // CHECK-RV64-NEXT:    ret void
3020 //
test_vsseg3e8_v_u8m1_m(vbool8_t mask,uint8_t * base,vuint8m1_t v0,vuint8m1_t v1,vuint8m1_t v2,size_t vl)3021 void test_vsseg3e8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) {
3022   return vsseg3e8_v_u8m1_m(mask, base, v0, v1, v2, vl);
3023 }
3024 
3025 // CHECK-RV64-LABEL: @test_vsseg4e8_v_u8m1_m(
3026 // CHECK-RV64-NEXT:  entry:
3027 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], <vscale x 8 x i8> [[V2:%.*]], <vscale x 8 x i8> [[V3:%.*]], i8* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3028 // CHECK-RV64-NEXT:    ret void
3029 //
test_vsseg4e8_v_u8m1_m(vbool8_t mask,uint8_t * base,vuint8m1_t v0,vuint8m1_t v1,vuint8m1_t v2,vuint8m1_t v3,size_t vl)3030 void test_vsseg4e8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) {
3031   return vsseg4e8_v_u8m1_m(mask, base, v0, v1, v2, v3, vl);
3032 }
3033 
3034 // CHECK-RV64-LABEL: @test_vsseg5e8_v_u8m1_m(
3035 // CHECK-RV64-NEXT:  entry:
3036 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], <vscale x 8 x i8> [[V2:%.*]], <vscale x 8 x i8> [[V3:%.*]], <vscale x 8 x i8> [[V4:%.*]], i8* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3037 // CHECK-RV64-NEXT:    ret void
3038 //
test_vsseg5e8_v_u8m1_m(vbool8_t mask,uint8_t * base,vuint8m1_t v0,vuint8m1_t v1,vuint8m1_t v2,vuint8m1_t v3,vuint8m1_t v4,size_t vl)3039 void test_vsseg5e8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) {
3040   return vsseg5e8_v_u8m1_m(mask, base, v0, v1, v2, v3, v4, vl);
3041 }
3042 
3043 // CHECK-RV64-LABEL: @test_vsseg6e8_v_u8m1_m(
3044 // CHECK-RV64-NEXT:  entry:
3045 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], <vscale x 8 x i8> [[V2:%.*]], <vscale x 8 x i8> [[V3:%.*]], <vscale x 8 x i8> [[V4:%.*]], <vscale x 8 x i8> [[V5:%.*]], i8* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3046 // CHECK-RV64-NEXT:    ret void
3047 //
test_vsseg6e8_v_u8m1_m(vbool8_t mask,uint8_t * base,vuint8m1_t v0,vuint8m1_t v1,vuint8m1_t v2,vuint8m1_t v3,vuint8m1_t v4,vuint8m1_t v5,size_t vl)3048 void test_vsseg6e8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) {
3049   return vsseg6e8_v_u8m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
3050 }
3051 
3052 // CHECK-RV64-LABEL: @test_vsseg7e8_v_u8m1_m(
3053 // CHECK-RV64-NEXT:  entry:
3054 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], <vscale x 8 x i8> [[V2:%.*]], <vscale x 8 x i8> [[V3:%.*]], <vscale x 8 x i8> [[V4:%.*]], <vscale x 8 x i8> [[V5:%.*]], <vscale x 8 x i8> [[V6:%.*]], i8* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3055 // CHECK-RV64-NEXT:    ret void
3056 //
test_vsseg7e8_v_u8m1_m(vbool8_t mask,uint8_t * base,vuint8m1_t v0,vuint8m1_t v1,vuint8m1_t v2,vuint8m1_t v3,vuint8m1_t v4,vuint8m1_t v5,vuint8m1_t v6,size_t vl)3057 void test_vsseg7e8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) {
3058   return vsseg7e8_v_u8m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
3059 }
3060 
3061 // CHECK-RV64-LABEL: @test_vsseg8e8_v_u8m1_m(
3062 // CHECK-RV64-NEXT:  entry:
3063 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv8i8.i64(<vscale x 8 x i8> [[V0:%.*]], <vscale x 8 x i8> [[V1:%.*]], <vscale x 8 x i8> [[V2:%.*]], <vscale x 8 x i8> [[V3:%.*]], <vscale x 8 x i8> [[V4:%.*]], <vscale x 8 x i8> [[V5:%.*]], <vscale x 8 x i8> [[V6:%.*]], <vscale x 8 x i8> [[V7:%.*]], i8* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3064 // CHECK-RV64-NEXT:    ret void
3065 //
test_vsseg8e8_v_u8m1_m(vbool8_t mask,uint8_t * base,vuint8m1_t v0,vuint8m1_t v1,vuint8m1_t v2,vuint8m1_t v3,vuint8m1_t v4,vuint8m1_t v5,vuint8m1_t v6,vuint8m1_t v7,size_t vl)3066 void test_vsseg8e8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) {
3067   return vsseg8e8_v_u8m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
3068 }
3069 
3070 // CHECK-RV64-LABEL: @test_vsseg2e8_v_u8m2_m(
3071 // CHECK-RV64-NEXT:  entry:
3072 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv16i8.i64(<vscale x 16 x i8> [[V0:%.*]], <vscale x 16 x i8> [[V1:%.*]], i8* [[BASE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3073 // CHECK-RV64-NEXT:    ret void
3074 //
test_vsseg2e8_v_u8m2_m(vbool4_t mask,uint8_t * base,vuint8m2_t v0,vuint8m2_t v1,size_t vl)3075 void test_vsseg2e8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, size_t vl) {
3076   return vsseg2e8_v_u8m2_m(mask, base, v0, v1, vl);
3077 }
3078 
3079 // CHECK-RV64-LABEL: @test_vsseg3e8_v_u8m2_m(
3080 // CHECK-RV64-NEXT:  entry:
3081 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv16i8.i64(<vscale x 16 x i8> [[V0:%.*]], <vscale x 16 x i8> [[V1:%.*]], <vscale x 16 x i8> [[V2:%.*]], i8* [[BASE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3082 // CHECK-RV64-NEXT:    ret void
3083 //
test_vsseg3e8_v_u8m2_m(vbool4_t mask,uint8_t * base,vuint8m2_t v0,vuint8m2_t v1,vuint8m2_t v2,size_t vl)3084 void test_vsseg3e8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) {
3085   return vsseg3e8_v_u8m2_m(mask, base, v0, v1, v2, vl);
3086 }
3087 
3088 // CHECK-RV64-LABEL: @test_vsseg4e8_v_u8m2_m(
3089 // CHECK-RV64-NEXT:  entry:
3090 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv16i8.i64(<vscale x 16 x i8> [[V0:%.*]], <vscale x 16 x i8> [[V1:%.*]], <vscale x 16 x i8> [[V2:%.*]], <vscale x 16 x i8> [[V3:%.*]], i8* [[BASE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3091 // CHECK-RV64-NEXT:    ret void
3092 //
test_vsseg4e8_v_u8m2_m(vbool4_t mask,uint8_t * base,vuint8m2_t v0,vuint8m2_t v1,vuint8m2_t v2,vuint8m2_t v3,size_t vl)3093 void test_vsseg4e8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) {
3094   return vsseg4e8_v_u8m2_m(mask, base, v0, v1, v2, v3, vl);
3095 }
3096 
3097 // CHECK-RV64-LABEL: @test_vsseg2e8_v_u8m4_m(
3098 // CHECK-RV64-NEXT:  entry:
3099 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv32i8.i64(<vscale x 32 x i8> [[V0:%.*]], <vscale x 32 x i8> [[V1:%.*]], i8* [[BASE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3100 // CHECK-RV64-NEXT:    ret void
3101 //
test_vsseg2e8_v_u8m4_m(vbool2_t mask,uint8_t * base,vuint8m4_t v0,vuint8m4_t v1,size_t vl)3102 void test_vsseg2e8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t v0, vuint8m4_t v1, size_t vl) {
3103   return vsseg2e8_v_u8m4_m(mask, base, v0, v1, vl);
3104 }
3105 
3106 // CHECK-RV64-LABEL: @test_vsseg2e16_v_u16mf4_m(
3107 // CHECK-RV64-NEXT:  entry:
3108 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], i16* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3109 // CHECK-RV64-NEXT:    ret void
3110 //
test_vsseg2e16_v_u16mf4_m(vbool64_t mask,uint16_t * base,vuint16mf4_t v0,vuint16mf4_t v1,size_t vl)3111 void test_vsseg2e16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) {
3112   return vsseg2e16_v_u16mf4_m(mask, base, v0, v1, vl);
3113 }
3114 
3115 // CHECK-RV64-LABEL: @test_vsseg3e16_v_u16mf4_m(
3116 // CHECK-RV64-NEXT:  entry:
3117 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], <vscale x 1 x i16> [[V2:%.*]], i16* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3118 // CHECK-RV64-NEXT:    ret void
3119 //
test_vsseg3e16_v_u16mf4_m(vbool64_t mask,uint16_t * base,vuint16mf4_t v0,vuint16mf4_t v1,vuint16mf4_t v2,size_t vl)3120 void test_vsseg3e16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) {
3121   return vsseg3e16_v_u16mf4_m(mask, base, v0, v1, v2, vl);
3122 }
3123 
3124 // CHECK-RV64-LABEL: @test_vsseg4e16_v_u16mf4_m(
3125 // CHECK-RV64-NEXT:  entry:
3126 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], <vscale x 1 x i16> [[V2:%.*]], <vscale x 1 x i16> [[V3:%.*]], i16* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3127 // CHECK-RV64-NEXT:    ret void
3128 //
test_vsseg4e16_v_u16mf4_m(vbool64_t mask,uint16_t * base,vuint16mf4_t v0,vuint16mf4_t v1,vuint16mf4_t v2,vuint16mf4_t v3,size_t vl)3129 void test_vsseg4e16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) {
3130   return vsseg4e16_v_u16mf4_m(mask, base, v0, v1, v2, v3, vl);
3131 }
3132 
3133 // CHECK-RV64-LABEL: @test_vsseg5e16_v_u16mf4_m(
3134 // CHECK-RV64-NEXT:  entry:
3135 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], <vscale x 1 x i16> [[V2:%.*]], <vscale x 1 x i16> [[V3:%.*]], <vscale x 1 x i16> [[V4:%.*]], i16* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3136 // CHECK-RV64-NEXT:    ret void
3137 //
test_vsseg5e16_v_u16mf4_m(vbool64_t mask,uint16_t * base,vuint16mf4_t v0,vuint16mf4_t v1,vuint16mf4_t v2,vuint16mf4_t v3,vuint16mf4_t v4,size_t vl)3138 void test_vsseg5e16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) {
3139   return vsseg5e16_v_u16mf4_m(mask, base, v0, v1, v2, v3, v4, vl);
3140 }
3141 
3142 // CHECK-RV64-LABEL: @test_vsseg6e16_v_u16mf4_m(
3143 // CHECK-RV64-NEXT:  entry:
3144 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], <vscale x 1 x i16> [[V2:%.*]], <vscale x 1 x i16> [[V3:%.*]], <vscale x 1 x i16> [[V4:%.*]], <vscale x 1 x i16> [[V5:%.*]], i16* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3145 // CHECK-RV64-NEXT:    ret void
3146 //
test_vsseg6e16_v_u16mf4_m(vbool64_t mask,uint16_t * base,vuint16mf4_t v0,vuint16mf4_t v1,vuint16mf4_t v2,vuint16mf4_t v3,vuint16mf4_t v4,vuint16mf4_t v5,size_t vl)3147 void test_vsseg6e16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) {
3148   return vsseg6e16_v_u16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
3149 }
3150 
3151 // CHECK-RV64-LABEL: @test_vsseg7e16_v_u16mf4_m(
3152 // CHECK-RV64-NEXT:  entry:
3153 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], <vscale x 1 x i16> [[V2:%.*]], <vscale x 1 x i16> [[V3:%.*]], <vscale x 1 x i16> [[V4:%.*]], <vscale x 1 x i16> [[V5:%.*]], <vscale x 1 x i16> [[V6:%.*]], i16* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3154 // CHECK-RV64-NEXT:    ret void
3155 //
test_vsseg7e16_v_u16mf4_m(vbool64_t mask,uint16_t * base,vuint16mf4_t v0,vuint16mf4_t v1,vuint16mf4_t v2,vuint16mf4_t v3,vuint16mf4_t v4,vuint16mf4_t v5,vuint16mf4_t v6,size_t vl)3156 void test_vsseg7e16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) {
3157   return vsseg7e16_v_u16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
3158 }
3159 
3160 // CHECK-RV64-LABEL: @test_vsseg8e16_v_u16mf4_m(
3161 // CHECK-RV64-NEXT:  entry:
3162 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv1i16.i64(<vscale x 1 x i16> [[V0:%.*]], <vscale x 1 x i16> [[V1:%.*]], <vscale x 1 x i16> [[V2:%.*]], <vscale x 1 x i16> [[V3:%.*]], <vscale x 1 x i16> [[V4:%.*]], <vscale x 1 x i16> [[V5:%.*]], <vscale x 1 x i16> [[V6:%.*]], <vscale x 1 x i16> [[V7:%.*]], i16* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3163 // CHECK-RV64-NEXT:    ret void
3164 //
test_vsseg8e16_v_u16mf4_m(vbool64_t mask,uint16_t * base,vuint16mf4_t v0,vuint16mf4_t v1,vuint16mf4_t v2,vuint16mf4_t v3,vuint16mf4_t v4,vuint16mf4_t v5,vuint16mf4_t v6,vuint16mf4_t v7,size_t vl)3165 void test_vsseg8e16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) {
3166   return vsseg8e16_v_u16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
3167 }
3168 
3169 // CHECK-RV64-LABEL: @test_vsseg2e16_v_u16mf2_m(
3170 // CHECK-RV64-NEXT:  entry:
3171 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], i16* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3172 // CHECK-RV64-NEXT:    ret void
3173 //
test_vsseg2e16_v_u16mf2_m(vbool32_t mask,uint16_t * base,vuint16mf2_t v0,vuint16mf2_t v1,size_t vl)3174 void test_vsseg2e16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) {
3175   return vsseg2e16_v_u16mf2_m(mask, base, v0, v1, vl);
3176 }
3177 
3178 // CHECK-RV64-LABEL: @test_vsseg3e16_v_u16mf2_m(
3179 // CHECK-RV64-NEXT:  entry:
3180 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], <vscale x 2 x i16> [[V2:%.*]], i16* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3181 // CHECK-RV64-NEXT:    ret void
3182 //
test_vsseg3e16_v_u16mf2_m(vbool32_t mask,uint16_t * base,vuint16mf2_t v0,vuint16mf2_t v1,vuint16mf2_t v2,size_t vl)3183 void test_vsseg3e16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) {
3184   return vsseg3e16_v_u16mf2_m(mask, base, v0, v1, v2, vl);
3185 }
3186 
3187 // CHECK-RV64-LABEL: @test_vsseg4e16_v_u16mf2_m(
3188 // CHECK-RV64-NEXT:  entry:
3189 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], <vscale x 2 x i16> [[V2:%.*]], <vscale x 2 x i16> [[V3:%.*]], i16* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3190 // CHECK-RV64-NEXT:    ret void
3191 //
test_vsseg4e16_v_u16mf2_m(vbool32_t mask,uint16_t * base,vuint16mf2_t v0,vuint16mf2_t v1,vuint16mf2_t v2,vuint16mf2_t v3,size_t vl)3192 void test_vsseg4e16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) {
3193   return vsseg4e16_v_u16mf2_m(mask, base, v0, v1, v2, v3, vl);
3194 }
3195 
3196 // CHECK-RV64-LABEL: @test_vsseg5e16_v_u16mf2_m(
3197 // CHECK-RV64-NEXT:  entry:
3198 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], <vscale x 2 x i16> [[V2:%.*]], <vscale x 2 x i16> [[V3:%.*]], <vscale x 2 x i16> [[V4:%.*]], i16* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3199 // CHECK-RV64-NEXT:    ret void
3200 //
test_vsseg5e16_v_u16mf2_m(vbool32_t mask,uint16_t * base,vuint16mf2_t v0,vuint16mf2_t v1,vuint16mf2_t v2,vuint16mf2_t v3,vuint16mf2_t v4,size_t vl)3201 void test_vsseg5e16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) {
3202   return vsseg5e16_v_u16mf2_m(mask, base, v0, v1, v2, v3, v4, vl);
3203 }
3204 
3205 // CHECK-RV64-LABEL: @test_vsseg6e16_v_u16mf2_m(
3206 // CHECK-RV64-NEXT:  entry:
3207 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], <vscale x 2 x i16> [[V2:%.*]], <vscale x 2 x i16> [[V3:%.*]], <vscale x 2 x i16> [[V4:%.*]], <vscale x 2 x i16> [[V5:%.*]], i16* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3208 // CHECK-RV64-NEXT:    ret void
3209 //
test_vsseg6e16_v_u16mf2_m(vbool32_t mask,uint16_t * base,vuint16mf2_t v0,vuint16mf2_t v1,vuint16mf2_t v2,vuint16mf2_t v3,vuint16mf2_t v4,vuint16mf2_t v5,size_t vl)3210 void test_vsseg6e16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) {
3211   return vsseg6e16_v_u16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
3212 }
3213 
3214 // CHECK-RV64-LABEL: @test_vsseg7e16_v_u16mf2_m(
3215 // CHECK-RV64-NEXT:  entry:
3216 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], <vscale x 2 x i16> [[V2:%.*]], <vscale x 2 x i16> [[V3:%.*]], <vscale x 2 x i16> [[V4:%.*]], <vscale x 2 x i16> [[V5:%.*]], <vscale x 2 x i16> [[V6:%.*]], i16* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3217 // CHECK-RV64-NEXT:    ret void
3218 //
test_vsseg7e16_v_u16mf2_m(vbool32_t mask,uint16_t * base,vuint16mf2_t v0,vuint16mf2_t v1,vuint16mf2_t v2,vuint16mf2_t v3,vuint16mf2_t v4,vuint16mf2_t v5,vuint16mf2_t v6,size_t vl)3219 void test_vsseg7e16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) {
3220   return vsseg7e16_v_u16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
3221 }
3222 
3223 // CHECK-RV64-LABEL: @test_vsseg8e16_v_u16mf2_m(
3224 // CHECK-RV64-NEXT:  entry:
3225 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv2i16.i64(<vscale x 2 x i16> [[V0:%.*]], <vscale x 2 x i16> [[V1:%.*]], <vscale x 2 x i16> [[V2:%.*]], <vscale x 2 x i16> [[V3:%.*]], <vscale x 2 x i16> [[V4:%.*]], <vscale x 2 x i16> [[V5:%.*]], <vscale x 2 x i16> [[V6:%.*]], <vscale x 2 x i16> [[V7:%.*]], i16* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3226 // CHECK-RV64-NEXT:    ret void
3227 //
test_vsseg8e16_v_u16mf2_m(vbool32_t mask,uint16_t * base,vuint16mf2_t v0,vuint16mf2_t v1,vuint16mf2_t v2,vuint16mf2_t v3,vuint16mf2_t v4,vuint16mf2_t v5,vuint16mf2_t v6,vuint16mf2_t v7,size_t vl)3228 void test_vsseg8e16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) {
3229   return vsseg8e16_v_u16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
3230 }
3231 
3232 // CHECK-RV64-LABEL: @test_vsseg2e16_v_u16m1_m(
3233 // CHECK-RV64-NEXT:  entry:
3234 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], i16* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3235 // CHECK-RV64-NEXT:    ret void
3236 //
test_vsseg2e16_v_u16m1_m(vbool16_t mask,uint16_t * base,vuint16m1_t v0,vuint16m1_t v1,size_t vl)3237 void test_vsseg2e16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, size_t vl) {
3238   return vsseg2e16_v_u16m1_m(mask, base, v0, v1, vl);
3239 }
3240 
3241 // CHECK-RV64-LABEL: @test_vsseg3e16_v_u16m1_m(
3242 // CHECK-RV64-NEXT:  entry:
3243 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], <vscale x 4 x i16> [[V2:%.*]], i16* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3244 // CHECK-RV64-NEXT:    ret void
3245 //
test_vsseg3e16_v_u16m1_m(vbool16_t mask,uint16_t * base,vuint16m1_t v0,vuint16m1_t v1,vuint16m1_t v2,size_t vl)3246 void test_vsseg3e16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) {
3247   return vsseg3e16_v_u16m1_m(mask, base, v0, v1, v2, vl);
3248 }
3249 
3250 // CHECK-RV64-LABEL: @test_vsseg4e16_v_u16m1_m(
3251 // CHECK-RV64-NEXT:  entry:
3252 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], <vscale x 4 x i16> [[V2:%.*]], <vscale x 4 x i16> [[V3:%.*]], i16* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3253 // CHECK-RV64-NEXT:    ret void
3254 //
test_vsseg4e16_v_u16m1_m(vbool16_t mask,uint16_t * base,vuint16m1_t v0,vuint16m1_t v1,vuint16m1_t v2,vuint16m1_t v3,size_t vl)3255 void test_vsseg4e16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) {
3256   return vsseg4e16_v_u16m1_m(mask, base, v0, v1, v2, v3, vl);
3257 }
3258 
3259 // CHECK-RV64-LABEL: @test_vsseg5e16_v_u16m1_m(
3260 // CHECK-RV64-NEXT:  entry:
3261 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], <vscale x 4 x i16> [[V2:%.*]], <vscale x 4 x i16> [[V3:%.*]], <vscale x 4 x i16> [[V4:%.*]], i16* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3262 // CHECK-RV64-NEXT:    ret void
3263 //
test_vsseg5e16_v_u16m1_m(vbool16_t mask,uint16_t * base,vuint16m1_t v0,vuint16m1_t v1,vuint16m1_t v2,vuint16m1_t v3,vuint16m1_t v4,size_t vl)3264 void test_vsseg5e16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) {
3265   return vsseg5e16_v_u16m1_m(mask, base, v0, v1, v2, v3, v4, vl);
3266 }
3267 
3268 // CHECK-RV64-LABEL: @test_vsseg6e16_v_u16m1_m(
3269 // CHECK-RV64-NEXT:  entry:
3270 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], <vscale x 4 x i16> [[V2:%.*]], <vscale x 4 x i16> [[V3:%.*]], <vscale x 4 x i16> [[V4:%.*]], <vscale x 4 x i16> [[V5:%.*]], i16* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3271 // CHECK-RV64-NEXT:    ret void
3272 //
test_vsseg6e16_v_u16m1_m(vbool16_t mask,uint16_t * base,vuint16m1_t v0,vuint16m1_t v1,vuint16m1_t v2,vuint16m1_t v3,vuint16m1_t v4,vuint16m1_t v5,size_t vl)3273 void test_vsseg6e16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) {
3274   return vsseg6e16_v_u16m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
3275 }
3276 
3277 // CHECK-RV64-LABEL: @test_vsseg7e16_v_u16m1_m(
3278 // CHECK-RV64-NEXT:  entry:
3279 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], <vscale x 4 x i16> [[V2:%.*]], <vscale x 4 x i16> [[V3:%.*]], <vscale x 4 x i16> [[V4:%.*]], <vscale x 4 x i16> [[V5:%.*]], <vscale x 4 x i16> [[V6:%.*]], i16* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3280 // CHECK-RV64-NEXT:    ret void
3281 //
test_vsseg7e16_v_u16m1_m(vbool16_t mask,uint16_t * base,vuint16m1_t v0,vuint16m1_t v1,vuint16m1_t v2,vuint16m1_t v3,vuint16m1_t v4,vuint16m1_t v5,vuint16m1_t v6,size_t vl)3282 void test_vsseg7e16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) {
3283   return vsseg7e16_v_u16m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
3284 }
3285 
3286 // CHECK-RV64-LABEL: @test_vsseg8e16_v_u16m1_m(
3287 // CHECK-RV64-NEXT:  entry:
3288 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv4i16.i64(<vscale x 4 x i16> [[V0:%.*]], <vscale x 4 x i16> [[V1:%.*]], <vscale x 4 x i16> [[V2:%.*]], <vscale x 4 x i16> [[V3:%.*]], <vscale x 4 x i16> [[V4:%.*]], <vscale x 4 x i16> [[V5:%.*]], <vscale x 4 x i16> [[V6:%.*]], <vscale x 4 x i16> [[V7:%.*]], i16* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3289 // CHECK-RV64-NEXT:    ret void
3290 //
test_vsseg8e16_v_u16m1_m(vbool16_t mask,uint16_t * base,vuint16m1_t v0,vuint16m1_t v1,vuint16m1_t v2,vuint16m1_t v3,vuint16m1_t v4,vuint16m1_t v5,vuint16m1_t v6,vuint16m1_t v7,size_t vl)3291 void test_vsseg8e16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) {
3292   return vsseg8e16_v_u16m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
3293 }
3294 
3295 // CHECK-RV64-LABEL: @test_vsseg2e16_v_u16m2_m(
3296 // CHECK-RV64-NEXT:  entry:
3297 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv8i16.i64(<vscale x 8 x i16> [[V0:%.*]], <vscale x 8 x i16> [[V1:%.*]], i16* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3298 // CHECK-RV64-NEXT:    ret void
3299 //
test_vsseg2e16_v_u16m2_m(vbool8_t mask,uint16_t * base,vuint16m2_t v0,vuint16m2_t v1,size_t vl)3300 void test_vsseg2e16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, size_t vl) {
3301   return vsseg2e16_v_u16m2_m(mask, base, v0, v1, vl);
3302 }
3303 
3304 // CHECK-RV64-LABEL: @test_vsseg3e16_v_u16m2_m(
3305 // CHECK-RV64-NEXT:  entry:
3306 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv8i16.i64(<vscale x 8 x i16> [[V0:%.*]], <vscale x 8 x i16> [[V1:%.*]], <vscale x 8 x i16> [[V2:%.*]], i16* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3307 // CHECK-RV64-NEXT:    ret void
3308 //
test_vsseg3e16_v_u16m2_m(vbool8_t mask,uint16_t * base,vuint16m2_t v0,vuint16m2_t v1,vuint16m2_t v2,size_t vl)3309 void test_vsseg3e16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) {
3310   return vsseg3e16_v_u16m2_m(mask, base, v0, v1, v2, vl);
3311 }
3312 
3313 // CHECK-RV64-LABEL: @test_vsseg4e16_v_u16m2_m(
3314 // CHECK-RV64-NEXT:  entry:
3315 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv8i16.i64(<vscale x 8 x i16> [[V0:%.*]], <vscale x 8 x i16> [[V1:%.*]], <vscale x 8 x i16> [[V2:%.*]], <vscale x 8 x i16> [[V3:%.*]], i16* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3316 // CHECK-RV64-NEXT:    ret void
3317 //
test_vsseg4e16_v_u16m2_m(vbool8_t mask,uint16_t * base,vuint16m2_t v0,vuint16m2_t v1,vuint16m2_t v2,vuint16m2_t v3,size_t vl)3318 void test_vsseg4e16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) {
3319   return vsseg4e16_v_u16m2_m(mask, base, v0, v1, v2, v3, vl);
3320 }
3321 
3322 // CHECK-RV64-LABEL: @test_vsseg2e16_v_u16m4_m(
3323 // CHECK-RV64-NEXT:  entry:
3324 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv16i16.i64(<vscale x 16 x i16> [[V0:%.*]], <vscale x 16 x i16> [[V1:%.*]], i16* [[BASE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3325 // CHECK-RV64-NEXT:    ret void
3326 //
test_vsseg2e16_v_u16m4_m(vbool4_t mask,uint16_t * base,vuint16m4_t v0,vuint16m4_t v1,size_t vl)3327 void test_vsseg2e16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t v0, vuint16m4_t v1, size_t vl) {
3328   return vsseg2e16_v_u16m4_m(mask, base, v0, v1, vl);
3329 }
3330 
3331 // CHECK-RV64-LABEL: @test_vsseg2e32_v_u32mf2_m(
3332 // CHECK-RV64-NEXT:  entry:
3333 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3334 // CHECK-RV64-NEXT:    ret void
3335 //
test_vsseg2e32_v_u32mf2_m(vbool64_t mask,uint32_t * base,vuint32mf2_t v0,vuint32mf2_t v1,size_t vl)3336 void test_vsseg2e32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) {
3337   return vsseg2e32_v_u32mf2_m(mask, base, v0, v1, vl);
3338 }
3339 
3340 // CHECK-RV64-LABEL: @test_vsseg3e32_v_u32mf2_m(
3341 // CHECK-RV64-NEXT:  entry:
3342 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], <vscale x 1 x i32> [[V2:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3343 // CHECK-RV64-NEXT:    ret void
3344 //
test_vsseg3e32_v_u32mf2_m(vbool64_t mask,uint32_t * base,vuint32mf2_t v0,vuint32mf2_t v1,vuint32mf2_t v2,size_t vl)3345 void test_vsseg3e32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) {
3346   return vsseg3e32_v_u32mf2_m(mask, base, v0, v1, v2, vl);
3347 }
3348 
3349 // CHECK-RV64-LABEL: @test_vsseg4e32_v_u32mf2_m(
3350 // CHECK-RV64-NEXT:  entry:
3351 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], <vscale x 1 x i32> [[V2:%.*]], <vscale x 1 x i32> [[V3:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3352 // CHECK-RV64-NEXT:    ret void
3353 //
test_vsseg4e32_v_u32mf2_m(vbool64_t mask,uint32_t * base,vuint32mf2_t v0,vuint32mf2_t v1,vuint32mf2_t v2,vuint32mf2_t v3,size_t vl)3354 void test_vsseg4e32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) {
3355   return vsseg4e32_v_u32mf2_m(mask, base, v0, v1, v2, v3, vl);
3356 }
3357 
3358 // CHECK-RV64-LABEL: @test_vsseg5e32_v_u32mf2_m(
3359 // CHECK-RV64-NEXT:  entry:
3360 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], <vscale x 1 x i32> [[V2:%.*]], <vscale x 1 x i32> [[V3:%.*]], <vscale x 1 x i32> [[V4:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3361 // CHECK-RV64-NEXT:    ret void
3362 //
test_vsseg5e32_v_u32mf2_m(vbool64_t mask,uint32_t * base,vuint32mf2_t v0,vuint32mf2_t v1,vuint32mf2_t v2,vuint32mf2_t v3,vuint32mf2_t v4,size_t vl)3363 void test_vsseg5e32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) {
3364   return vsseg5e32_v_u32mf2_m(mask, base, v0, v1, v2, v3, v4, vl);
3365 }
3366 
3367 // CHECK-RV64-LABEL: @test_vsseg6e32_v_u32mf2_m(
3368 // CHECK-RV64-NEXT:  entry:
3369 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], <vscale x 1 x i32> [[V2:%.*]], <vscale x 1 x i32> [[V3:%.*]], <vscale x 1 x i32> [[V4:%.*]], <vscale x 1 x i32> [[V5:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3370 // CHECK-RV64-NEXT:    ret void
3371 //
test_vsseg6e32_v_u32mf2_m(vbool64_t mask,uint32_t * base,vuint32mf2_t v0,vuint32mf2_t v1,vuint32mf2_t v2,vuint32mf2_t v3,vuint32mf2_t v4,vuint32mf2_t v5,size_t vl)3372 void test_vsseg6e32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) {
3373   return vsseg6e32_v_u32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
3374 }
3375 
3376 // CHECK-RV64-LABEL: @test_vsseg7e32_v_u32mf2_m(
3377 // CHECK-RV64-NEXT:  entry:
3378 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], <vscale x 1 x i32> [[V2:%.*]], <vscale x 1 x i32> [[V3:%.*]], <vscale x 1 x i32> [[V4:%.*]], <vscale x 1 x i32> [[V5:%.*]], <vscale x 1 x i32> [[V6:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3379 // CHECK-RV64-NEXT:    ret void
3380 //
test_vsseg7e32_v_u32mf2_m(vbool64_t mask,uint32_t * base,vuint32mf2_t v0,vuint32mf2_t v1,vuint32mf2_t v2,vuint32mf2_t v3,vuint32mf2_t v4,vuint32mf2_t v5,vuint32mf2_t v6,size_t vl)3381 void test_vsseg7e32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) {
3382   return vsseg7e32_v_u32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
3383 }
3384 
3385 // CHECK-RV64-LABEL: @test_vsseg8e32_v_u32mf2_m(
3386 // CHECK-RV64-NEXT:  entry:
3387 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv1i32.i64(<vscale x 1 x i32> [[V0:%.*]], <vscale x 1 x i32> [[V1:%.*]], <vscale x 1 x i32> [[V2:%.*]], <vscale x 1 x i32> [[V3:%.*]], <vscale x 1 x i32> [[V4:%.*]], <vscale x 1 x i32> [[V5:%.*]], <vscale x 1 x i32> [[V6:%.*]], <vscale x 1 x i32> [[V7:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3388 // CHECK-RV64-NEXT:    ret void
3389 //
test_vsseg8e32_v_u32mf2_m(vbool64_t mask,uint32_t * base,vuint32mf2_t v0,vuint32mf2_t v1,vuint32mf2_t v2,vuint32mf2_t v3,vuint32mf2_t v4,vuint32mf2_t v5,vuint32mf2_t v6,vuint32mf2_t v7,size_t vl)3390 void test_vsseg8e32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) {
3391   return vsseg8e32_v_u32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
3392 }
3393 
3394 // CHECK-RV64-LABEL: @test_vsseg2e32_v_u32m1_m(
3395 // CHECK-RV64-NEXT:  entry:
3396 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], i32* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3397 // CHECK-RV64-NEXT:    ret void
3398 //
test_vsseg2e32_v_u32m1_m(vbool32_t mask,uint32_t * base,vuint32m1_t v0,vuint32m1_t v1,size_t vl)3399 void test_vsseg2e32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, size_t vl) {
3400   return vsseg2e32_v_u32m1_m(mask, base, v0, v1, vl);
3401 }
3402 
3403 // CHECK-RV64-LABEL: @test_vsseg3e32_v_u32m1_m(
3404 // CHECK-RV64-NEXT:  entry:
3405 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], <vscale x 2 x i32> [[V2:%.*]], i32* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3406 // CHECK-RV64-NEXT:    ret void
3407 //
test_vsseg3e32_v_u32m1_m(vbool32_t mask,uint32_t * base,vuint32m1_t v0,vuint32m1_t v1,vuint32m1_t v2,size_t vl)3408 void test_vsseg3e32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) {
3409   return vsseg3e32_v_u32m1_m(mask, base, v0, v1, v2, vl);
3410 }
3411 
3412 // CHECK-RV64-LABEL: @test_vsseg4e32_v_u32m1_m(
3413 // CHECK-RV64-NEXT:  entry:
3414 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], <vscale x 2 x i32> [[V2:%.*]], <vscale x 2 x i32> [[V3:%.*]], i32* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3415 // CHECK-RV64-NEXT:    ret void
3416 //
test_vsseg4e32_v_u32m1_m(vbool32_t mask,uint32_t * base,vuint32m1_t v0,vuint32m1_t v1,vuint32m1_t v2,vuint32m1_t v3,size_t vl)3417 void test_vsseg4e32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) {
3418   return vsseg4e32_v_u32m1_m(mask, base, v0, v1, v2, v3, vl);
3419 }
3420 
3421 // CHECK-RV64-LABEL: @test_vsseg5e32_v_u32m1_m(
3422 // CHECK-RV64-NEXT:  entry:
3423 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], <vscale x 2 x i32> [[V2:%.*]], <vscale x 2 x i32> [[V3:%.*]], <vscale x 2 x i32> [[V4:%.*]], i32* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3424 // CHECK-RV64-NEXT:    ret void
3425 //
test_vsseg5e32_v_u32m1_m(vbool32_t mask,uint32_t * base,vuint32m1_t v0,vuint32m1_t v1,vuint32m1_t v2,vuint32m1_t v3,vuint32m1_t v4,size_t vl)3426 void test_vsseg5e32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) {
3427   return vsseg5e32_v_u32m1_m(mask, base, v0, v1, v2, v3, v4, vl);
3428 }
3429 
3430 // CHECK-RV64-LABEL: @test_vsseg6e32_v_u32m1_m(
3431 // CHECK-RV64-NEXT:  entry:
3432 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], <vscale x 2 x i32> [[V2:%.*]], <vscale x 2 x i32> [[V3:%.*]], <vscale x 2 x i32> [[V4:%.*]], <vscale x 2 x i32> [[V5:%.*]], i32* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3433 // CHECK-RV64-NEXT:    ret void
3434 //
test_vsseg6e32_v_u32m1_m(vbool32_t mask,uint32_t * base,vuint32m1_t v0,vuint32m1_t v1,vuint32m1_t v2,vuint32m1_t v3,vuint32m1_t v4,vuint32m1_t v5,size_t vl)3435 void test_vsseg6e32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) {
3436   return vsseg6e32_v_u32m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
3437 }
3438 
3439 // CHECK-RV64-LABEL: @test_vsseg7e32_v_u32m1_m(
3440 // CHECK-RV64-NEXT:  entry:
3441 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], <vscale x 2 x i32> [[V2:%.*]], <vscale x 2 x i32> [[V3:%.*]], <vscale x 2 x i32> [[V4:%.*]], <vscale x 2 x i32> [[V5:%.*]], <vscale x 2 x i32> [[V6:%.*]], i32* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3442 // CHECK-RV64-NEXT:    ret void
3443 //
test_vsseg7e32_v_u32m1_m(vbool32_t mask,uint32_t * base,vuint32m1_t v0,vuint32m1_t v1,vuint32m1_t v2,vuint32m1_t v3,vuint32m1_t v4,vuint32m1_t v5,vuint32m1_t v6,size_t vl)3444 void test_vsseg7e32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) {
3445   return vsseg7e32_v_u32m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
3446 }
3447 
3448 // CHECK-RV64-LABEL: @test_vsseg8e32_v_u32m1_m(
3449 // CHECK-RV64-NEXT:  entry:
3450 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv2i32.i64(<vscale x 2 x i32> [[V0:%.*]], <vscale x 2 x i32> [[V1:%.*]], <vscale x 2 x i32> [[V2:%.*]], <vscale x 2 x i32> [[V3:%.*]], <vscale x 2 x i32> [[V4:%.*]], <vscale x 2 x i32> [[V5:%.*]], <vscale x 2 x i32> [[V6:%.*]], <vscale x 2 x i32> [[V7:%.*]], i32* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3451 // CHECK-RV64-NEXT:    ret void
3452 //
test_vsseg8e32_v_u32m1_m(vbool32_t mask,uint32_t * base,vuint32m1_t v0,vuint32m1_t v1,vuint32m1_t v2,vuint32m1_t v3,vuint32m1_t v4,vuint32m1_t v5,vuint32m1_t v6,vuint32m1_t v7,size_t vl)3453 void test_vsseg8e32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) {
3454   return vsseg8e32_v_u32m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
3455 }
3456 
3457 // CHECK-RV64-LABEL: @test_vsseg2e32_v_u32m2_m(
3458 // CHECK-RV64-NEXT:  entry:
3459 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv4i32.i64(<vscale x 4 x i32> [[V0:%.*]], <vscale x 4 x i32> [[V1:%.*]], i32* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3460 // CHECK-RV64-NEXT:    ret void
3461 //
test_vsseg2e32_v_u32m2_m(vbool16_t mask,uint32_t * base,vuint32m2_t v0,vuint32m2_t v1,size_t vl)3462 void test_vsseg2e32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, size_t vl) {
3463   return vsseg2e32_v_u32m2_m(mask, base, v0, v1, vl);
3464 }
3465 
3466 // CHECK-RV64-LABEL: @test_vsseg3e32_v_u32m2_m(
3467 // CHECK-RV64-NEXT:  entry:
3468 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv4i32.i64(<vscale x 4 x i32> [[V0:%.*]], <vscale x 4 x i32> [[V1:%.*]], <vscale x 4 x i32> [[V2:%.*]], i32* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3469 // CHECK-RV64-NEXT:    ret void
3470 //
test_vsseg3e32_v_u32m2_m(vbool16_t mask,uint32_t * base,vuint32m2_t v0,vuint32m2_t v1,vuint32m2_t v2,size_t vl)3471 void test_vsseg3e32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) {
3472   return vsseg3e32_v_u32m2_m(mask, base, v0, v1, v2, vl);
3473 }
3474 
3475 // CHECK-RV64-LABEL: @test_vsseg4e32_v_u32m2_m(
3476 // CHECK-RV64-NEXT:  entry:
3477 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv4i32.i64(<vscale x 4 x i32> [[V0:%.*]], <vscale x 4 x i32> [[V1:%.*]], <vscale x 4 x i32> [[V2:%.*]], <vscale x 4 x i32> [[V3:%.*]], i32* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3478 // CHECK-RV64-NEXT:    ret void
3479 //
test_vsseg4e32_v_u32m2_m(vbool16_t mask,uint32_t * base,vuint32m2_t v0,vuint32m2_t v1,vuint32m2_t v2,vuint32m2_t v3,size_t vl)3480 void test_vsseg4e32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) {
3481   return vsseg4e32_v_u32m2_m(mask, base, v0, v1, v2, v3, vl);
3482 }
3483 
3484 // CHECK-RV64-LABEL: @test_vsseg2e32_v_u32m4_m(
3485 // CHECK-RV64-NEXT:  entry:
3486 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv8i32.i64(<vscale x 8 x i32> [[V0:%.*]], <vscale x 8 x i32> [[V1:%.*]], i32* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3487 // CHECK-RV64-NEXT:    ret void
3488 //
test_vsseg2e32_v_u32m4_m(vbool8_t mask,uint32_t * base,vuint32m4_t v0,vuint32m4_t v1,size_t vl)3489 void test_vsseg2e32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t v0, vuint32m4_t v1, size_t vl) {
3490   return vsseg2e32_v_u32m4_m(mask, base, v0, v1, vl);
3491 }
3492 
3493 // CHECK-RV64-LABEL: @test_vsseg2e64_v_u64m1_m(
3494 // CHECK-RV64-NEXT:  entry:
3495 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], i64* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3496 // CHECK-RV64-NEXT:    ret void
3497 //
test_vsseg2e64_v_u64m1_m(vbool64_t mask,uint64_t * base,vuint64m1_t v0,vuint64m1_t v1,size_t vl)3498 void test_vsseg2e64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, size_t vl) {
3499   return vsseg2e64_v_u64m1_m(mask, base, v0, v1, vl);
3500 }
3501 
3502 // CHECK-RV64-LABEL: @test_vsseg3e64_v_u64m1_m(
3503 // CHECK-RV64-NEXT:  entry:
3504 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], <vscale x 1 x i64> [[V2:%.*]], i64* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3505 // CHECK-RV64-NEXT:    ret void
3506 //
test_vsseg3e64_v_u64m1_m(vbool64_t mask,uint64_t * base,vuint64m1_t v0,vuint64m1_t v1,vuint64m1_t v2,size_t vl)3507 void test_vsseg3e64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) {
3508   return vsseg3e64_v_u64m1_m(mask, base, v0, v1, v2, vl);
3509 }
3510 
3511 // CHECK-RV64-LABEL: @test_vsseg4e64_v_u64m1_m(
3512 // CHECK-RV64-NEXT:  entry:
3513 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], <vscale x 1 x i64> [[V2:%.*]], <vscale x 1 x i64> [[V3:%.*]], i64* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3514 // CHECK-RV64-NEXT:    ret void
3515 //
test_vsseg4e64_v_u64m1_m(vbool64_t mask,uint64_t * base,vuint64m1_t v0,vuint64m1_t v1,vuint64m1_t v2,vuint64m1_t v3,size_t vl)3516 void test_vsseg4e64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) {
3517   return vsseg4e64_v_u64m1_m(mask, base, v0, v1, v2, v3, vl);
3518 }
3519 
3520 // CHECK-RV64-LABEL: @test_vsseg5e64_v_u64m1_m(
3521 // CHECK-RV64-NEXT:  entry:
3522 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], <vscale x 1 x i64> [[V2:%.*]], <vscale x 1 x i64> [[V3:%.*]], <vscale x 1 x i64> [[V4:%.*]], i64* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3523 // CHECK-RV64-NEXT:    ret void
3524 //
test_vsseg5e64_v_u64m1_m(vbool64_t mask,uint64_t * base,vuint64m1_t v0,vuint64m1_t v1,vuint64m1_t v2,vuint64m1_t v3,vuint64m1_t v4,size_t vl)3525 void test_vsseg5e64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) {
3526   return vsseg5e64_v_u64m1_m(mask, base, v0, v1, v2, v3, v4, vl);
3527 }
3528 
3529 // CHECK-RV64-LABEL: @test_vsseg6e64_v_u64m1_m(
3530 // CHECK-RV64-NEXT:  entry:
3531 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], <vscale x 1 x i64> [[V2:%.*]], <vscale x 1 x i64> [[V3:%.*]], <vscale x 1 x i64> [[V4:%.*]], <vscale x 1 x i64> [[V5:%.*]], i64* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3532 // CHECK-RV64-NEXT:    ret void
3533 //
test_vsseg6e64_v_u64m1_m(vbool64_t mask,uint64_t * base,vuint64m1_t v0,vuint64m1_t v1,vuint64m1_t v2,vuint64m1_t v3,vuint64m1_t v4,vuint64m1_t v5,size_t vl)3534 void test_vsseg6e64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) {
3535   return vsseg6e64_v_u64m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
3536 }
3537 
3538 // CHECK-RV64-LABEL: @test_vsseg7e64_v_u64m1_m(
3539 // CHECK-RV64-NEXT:  entry:
3540 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], <vscale x 1 x i64> [[V2:%.*]], <vscale x 1 x i64> [[V3:%.*]], <vscale x 1 x i64> [[V4:%.*]], <vscale x 1 x i64> [[V5:%.*]], <vscale x 1 x i64> [[V6:%.*]], i64* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3541 // CHECK-RV64-NEXT:    ret void
3542 //
test_vsseg7e64_v_u64m1_m(vbool64_t mask,uint64_t * base,vuint64m1_t v0,vuint64m1_t v1,vuint64m1_t v2,vuint64m1_t v3,vuint64m1_t v4,vuint64m1_t v5,vuint64m1_t v6,size_t vl)3543 void test_vsseg7e64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) {
3544   return vsseg7e64_v_u64m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
3545 }
3546 
3547 // CHECK-RV64-LABEL: @test_vsseg8e64_v_u64m1_m(
3548 // CHECK-RV64-NEXT:  entry:
3549 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv1i64.i64(<vscale x 1 x i64> [[V0:%.*]], <vscale x 1 x i64> [[V1:%.*]], <vscale x 1 x i64> [[V2:%.*]], <vscale x 1 x i64> [[V3:%.*]], <vscale x 1 x i64> [[V4:%.*]], <vscale x 1 x i64> [[V5:%.*]], <vscale x 1 x i64> [[V6:%.*]], <vscale x 1 x i64> [[V7:%.*]], i64* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3550 // CHECK-RV64-NEXT:    ret void
3551 //
test_vsseg8e64_v_u64m1_m(vbool64_t mask,uint64_t * base,vuint64m1_t v0,vuint64m1_t v1,vuint64m1_t v2,vuint64m1_t v3,vuint64m1_t v4,vuint64m1_t v5,vuint64m1_t v6,vuint64m1_t v7,size_t vl)3552 void test_vsseg8e64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) {
3553   return vsseg8e64_v_u64m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
3554 }
3555 
3556 // CHECK-RV64-LABEL: @test_vsseg2e64_v_u64m2_m(
3557 // CHECK-RV64-NEXT:  entry:
3558 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv2i64.i64(<vscale x 2 x i64> [[V0:%.*]], <vscale x 2 x i64> [[V1:%.*]], i64* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3559 // CHECK-RV64-NEXT:    ret void
3560 //
test_vsseg2e64_v_u64m2_m(vbool32_t mask,uint64_t * base,vuint64m2_t v0,vuint64m2_t v1,size_t vl)3561 void test_vsseg2e64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, size_t vl) {
3562   return vsseg2e64_v_u64m2_m(mask, base, v0, v1, vl);
3563 }
3564 
3565 // CHECK-RV64-LABEL: @test_vsseg3e64_v_u64m2_m(
3566 // CHECK-RV64-NEXT:  entry:
3567 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv2i64.i64(<vscale x 2 x i64> [[V0:%.*]], <vscale x 2 x i64> [[V1:%.*]], <vscale x 2 x i64> [[V2:%.*]], i64* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3568 // CHECK-RV64-NEXT:    ret void
3569 //
test_vsseg3e64_v_u64m2_m(vbool32_t mask,uint64_t * base,vuint64m2_t v0,vuint64m2_t v1,vuint64m2_t v2,size_t vl)3570 void test_vsseg3e64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) {
3571   return vsseg3e64_v_u64m2_m(mask, base, v0, v1, v2, vl);
3572 }
3573 
3574 // CHECK-RV64-LABEL: @test_vsseg4e64_v_u64m2_m(
3575 // CHECK-RV64-NEXT:  entry:
3576 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv2i64.i64(<vscale x 2 x i64> [[V0:%.*]], <vscale x 2 x i64> [[V1:%.*]], <vscale x 2 x i64> [[V2:%.*]], <vscale x 2 x i64> [[V3:%.*]], i64* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3577 // CHECK-RV64-NEXT:    ret void
3578 //
test_vsseg4e64_v_u64m2_m(vbool32_t mask,uint64_t * base,vuint64m2_t v0,vuint64m2_t v1,vuint64m2_t v2,vuint64m2_t v3,size_t vl)3579 void test_vsseg4e64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) {
3580   return vsseg4e64_v_u64m2_m(mask, base, v0, v1, v2, v3, vl);
3581 }
3582 
3583 // CHECK-RV64-LABEL: @test_vsseg2e64_v_u64m4_m(
3584 // CHECK-RV64-NEXT:  entry:
3585 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv4i64.i64(<vscale x 4 x i64> [[V0:%.*]], <vscale x 4 x i64> [[V1:%.*]], i64* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3586 // CHECK-RV64-NEXT:    ret void
3587 //
test_vsseg2e64_v_u64m4_m(vbool16_t mask,uint64_t * base,vuint64m4_t v0,vuint64m4_t v1,size_t vl)3588 void test_vsseg2e64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t v0, vuint64m4_t v1, size_t vl) {
3589   return vsseg2e64_v_u64m4_m(mask, base, v0, v1, vl);
3590 }
3591 
3592 // CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf4_m(
3593 // CHECK-RV64-NEXT:  entry:
3594 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3595 // CHECK-RV64-NEXT:    ret void
3596 //
test_vsseg2e16_v_f16mf4_m(vbool64_t mask,_Float16 * base,vfloat16mf4_t v0,vfloat16mf4_t v1,size_t vl)3597 void test_vsseg2e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) {
3598   return vsseg2e16_v_f16mf4_m(mask, base, v0, v1, vl);
3599 }
3600 
3601 // CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf4_m(
3602 // CHECK-RV64-NEXT:  entry:
3603 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3604 // CHECK-RV64-NEXT:    ret void
3605 //
test_vsseg3e16_v_f16mf4_m(vbool64_t mask,_Float16 * base,vfloat16mf4_t v0,vfloat16mf4_t v1,vfloat16mf4_t v2,size_t vl)3606 void test_vsseg3e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) {
3607   return vsseg3e16_v_f16mf4_m(mask, base, v0, v1, v2, vl);
3608 }
3609 
3610 // CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf4_m(
3611 // CHECK-RV64-NEXT:  entry:
3612 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3613 // CHECK-RV64-NEXT:    ret void
3614 //
test_vsseg4e16_v_f16mf4_m(vbool64_t mask,_Float16 * base,vfloat16mf4_t v0,vfloat16mf4_t v1,vfloat16mf4_t v2,vfloat16mf4_t v3,size_t vl)3615 void test_vsseg4e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) {
3616   return vsseg4e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, vl);
3617 }
3618 
3619 // CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf4_m(
3620 // CHECK-RV64-NEXT:  entry:
3621 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3622 // CHECK-RV64-NEXT:    ret void
3623 //
test_vsseg5e16_v_f16mf4_m(vbool64_t mask,_Float16 * base,vfloat16mf4_t v0,vfloat16mf4_t v1,vfloat16mf4_t v2,vfloat16mf4_t v3,vfloat16mf4_t v4,size_t vl)3624 void test_vsseg5e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) {
3625   return vsseg5e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, v4, vl);
3626 }
3627 
3628 // CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf4_m(
3629 // CHECK-RV64-NEXT:  entry:
3630 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3631 // CHECK-RV64-NEXT:    ret void
3632 //
test_vsseg6e16_v_f16mf4_m(vbool64_t mask,_Float16 * base,vfloat16mf4_t v0,vfloat16mf4_t v1,vfloat16mf4_t v2,vfloat16mf4_t v3,vfloat16mf4_t v4,vfloat16mf4_t v5,size_t vl)3633 void test_vsseg6e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) {
3634   return vsseg6e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
3635 }
3636 
3637 // CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf4_m(
3638 // CHECK-RV64-NEXT:  entry:
3639 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3640 // CHECK-RV64-NEXT:    ret void
3641 //
test_vsseg7e16_v_f16mf4_m(vbool64_t mask,_Float16 * base,vfloat16mf4_t v0,vfloat16mf4_t v1,vfloat16mf4_t v2,vfloat16mf4_t v3,vfloat16mf4_t v4,vfloat16mf4_t v5,vfloat16mf4_t v6,size_t vl)3642 void test_vsseg7e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) {
3643   return vsseg7e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
3644 }
3645 
3646 // CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf4_m(
3647 // CHECK-RV64-NEXT:  entry:
3648 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv1f16.i64(<vscale x 1 x half> [[V0:%.*]], <vscale x 1 x half> [[V1:%.*]], <vscale x 1 x half> [[V2:%.*]], <vscale x 1 x half> [[V3:%.*]], <vscale x 1 x half> [[V4:%.*]], <vscale x 1 x half> [[V5:%.*]], <vscale x 1 x half> [[V6:%.*]], <vscale x 1 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3649 // CHECK-RV64-NEXT:    ret void
3650 //
test_vsseg8e16_v_f16mf4_m(vbool64_t mask,_Float16 * base,vfloat16mf4_t v0,vfloat16mf4_t v1,vfloat16mf4_t v2,vfloat16mf4_t v3,vfloat16mf4_t v4,vfloat16mf4_t v5,vfloat16mf4_t v6,vfloat16mf4_t v7,size_t vl)3651 void test_vsseg8e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) {
3652   return vsseg8e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
3653 }
3654 
3655 // CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf2_m(
3656 // CHECK-RV64-NEXT:  entry:
3657 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3658 // CHECK-RV64-NEXT:    ret void
3659 //
test_vsseg2e16_v_f16mf2_m(vbool32_t mask,_Float16 * base,vfloat16mf2_t v0,vfloat16mf2_t v1,size_t vl)3660 void test_vsseg2e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) {
3661   return vsseg2e16_v_f16mf2_m(mask, base, v0, v1, vl);
3662 }
3663 
3664 // CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf2_m(
3665 // CHECK-RV64-NEXT:  entry:
3666 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3667 // CHECK-RV64-NEXT:    ret void
3668 //
test_vsseg3e16_v_f16mf2_m(vbool32_t mask,_Float16 * base,vfloat16mf2_t v0,vfloat16mf2_t v1,vfloat16mf2_t v2,size_t vl)3669 void test_vsseg3e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) {
3670   return vsseg3e16_v_f16mf2_m(mask, base, v0, v1, v2, vl);
3671 }
3672 
3673 // CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf2_m(
3674 // CHECK-RV64-NEXT:  entry:
3675 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3676 // CHECK-RV64-NEXT:    ret void
3677 //
test_vsseg4e16_v_f16mf2_m(vbool32_t mask,_Float16 * base,vfloat16mf2_t v0,vfloat16mf2_t v1,vfloat16mf2_t v2,vfloat16mf2_t v3,size_t vl)3678 void test_vsseg4e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) {
3679   return vsseg4e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, vl);
3680 }
3681 
3682 // CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf2_m(
3683 // CHECK-RV64-NEXT:  entry:
3684 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3685 // CHECK-RV64-NEXT:    ret void
3686 //
test_vsseg5e16_v_f16mf2_m(vbool32_t mask,_Float16 * base,vfloat16mf2_t v0,vfloat16mf2_t v1,vfloat16mf2_t v2,vfloat16mf2_t v3,vfloat16mf2_t v4,size_t vl)3687 void test_vsseg5e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) {
3688   return vsseg5e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, v4, vl);
3689 }
3690 
3691 // CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf2_m(
3692 // CHECK-RV64-NEXT:  entry:
3693 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3694 // CHECK-RV64-NEXT:    ret void
3695 //
test_vsseg6e16_v_f16mf2_m(vbool32_t mask,_Float16 * base,vfloat16mf2_t v0,vfloat16mf2_t v1,vfloat16mf2_t v2,vfloat16mf2_t v3,vfloat16mf2_t v4,vfloat16mf2_t v5,size_t vl)3696 void test_vsseg6e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) {
3697   return vsseg6e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
3698 }
3699 
3700 // CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf2_m(
3701 // CHECK-RV64-NEXT:  entry:
3702 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3703 // CHECK-RV64-NEXT:    ret void
3704 //
test_vsseg7e16_v_f16mf2_m(vbool32_t mask,_Float16 * base,vfloat16mf2_t v0,vfloat16mf2_t v1,vfloat16mf2_t v2,vfloat16mf2_t v3,vfloat16mf2_t v4,vfloat16mf2_t v5,vfloat16mf2_t v6,size_t vl)3705 void test_vsseg7e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) {
3706   return vsseg7e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
3707 }
3708 
3709 // CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf2_m(
3710 // CHECK-RV64-NEXT:  entry:
3711 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv2f16.i64(<vscale x 2 x half> [[V0:%.*]], <vscale x 2 x half> [[V1:%.*]], <vscale x 2 x half> [[V2:%.*]], <vscale x 2 x half> [[V3:%.*]], <vscale x 2 x half> [[V4:%.*]], <vscale x 2 x half> [[V5:%.*]], <vscale x 2 x half> [[V6:%.*]], <vscale x 2 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3712 // CHECK-RV64-NEXT:    ret void
3713 //
test_vsseg8e16_v_f16mf2_m(vbool32_t mask,_Float16 * base,vfloat16mf2_t v0,vfloat16mf2_t v1,vfloat16mf2_t v2,vfloat16mf2_t v3,vfloat16mf2_t v4,vfloat16mf2_t v5,vfloat16mf2_t v6,vfloat16mf2_t v7,size_t vl)3714 void test_vsseg8e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) {
3715   return vsseg8e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
3716 }
3717 
3718 // CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m1_m(
3719 // CHECK-RV64-NEXT:  entry:
3720 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3721 // CHECK-RV64-NEXT:    ret void
3722 //
test_vsseg2e16_v_f16m1_m(vbool16_t mask,_Float16 * base,vfloat16m1_t v0,vfloat16m1_t v1,size_t vl)3723 void test_vsseg2e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) {
3724   return vsseg2e16_v_f16m1_m(mask, base, v0, v1, vl);
3725 }
3726 
3727 // CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m1_m(
3728 // CHECK-RV64-NEXT:  entry:
3729 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3730 // CHECK-RV64-NEXT:    ret void
3731 //
test_vsseg3e16_v_f16m1_m(vbool16_t mask,_Float16 * base,vfloat16m1_t v0,vfloat16m1_t v1,vfloat16m1_t v2,size_t vl)3732 void test_vsseg3e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) {
3733   return vsseg3e16_v_f16m1_m(mask, base, v0, v1, v2, vl);
3734 }
3735 
3736 // CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m1_m(
3737 // CHECK-RV64-NEXT:  entry:
3738 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3739 // CHECK-RV64-NEXT:    ret void
3740 //
test_vsseg4e16_v_f16m1_m(vbool16_t mask,_Float16 * base,vfloat16m1_t v0,vfloat16m1_t v1,vfloat16m1_t v2,vfloat16m1_t v3,size_t vl)3741 void test_vsseg4e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) {
3742   return vsseg4e16_v_f16m1_m(mask, base, v0, v1, v2, v3, vl);
3743 }
3744 
3745 // CHECK-RV64-LABEL: @test_vsseg5e16_v_f16m1_m(
3746 // CHECK-RV64-NEXT:  entry:
3747 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3748 // CHECK-RV64-NEXT:    ret void
3749 //
test_vsseg5e16_v_f16m1_m(vbool16_t mask,_Float16 * base,vfloat16m1_t v0,vfloat16m1_t v1,vfloat16m1_t v2,vfloat16m1_t v3,vfloat16m1_t v4,size_t vl)3750 void test_vsseg5e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) {
3751   return vsseg5e16_v_f16m1_m(mask, base, v0, v1, v2, v3, v4, vl);
3752 }
3753 
3754 // CHECK-RV64-LABEL: @test_vsseg6e16_v_f16m1_m(
3755 // CHECK-RV64-NEXT:  entry:
3756 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3757 // CHECK-RV64-NEXT:    ret void
3758 //
test_vsseg6e16_v_f16m1_m(vbool16_t mask,_Float16 * base,vfloat16m1_t v0,vfloat16m1_t v1,vfloat16m1_t v2,vfloat16m1_t v3,vfloat16m1_t v4,vfloat16m1_t v5,size_t vl)3759 void test_vsseg6e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) {
3760   return vsseg6e16_v_f16m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
3761 }
3762 
3763 // CHECK-RV64-LABEL: @test_vsseg7e16_v_f16m1_m(
3764 // CHECK-RV64-NEXT:  entry:
3765 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3766 // CHECK-RV64-NEXT:    ret void
3767 //
test_vsseg7e16_v_f16m1_m(vbool16_t mask,_Float16 * base,vfloat16m1_t v0,vfloat16m1_t v1,vfloat16m1_t v2,vfloat16m1_t v3,vfloat16m1_t v4,vfloat16m1_t v5,vfloat16m1_t v6,size_t vl)3768 void test_vsseg7e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) {
3769   return vsseg7e16_v_f16m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
3770 }
3771 
3772 // CHECK-RV64-LABEL: @test_vsseg8e16_v_f16m1_m(
3773 // CHECK-RV64-NEXT:  entry:
3774 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv4f16.i64(<vscale x 4 x half> [[V0:%.*]], <vscale x 4 x half> [[V1:%.*]], <vscale x 4 x half> [[V2:%.*]], <vscale x 4 x half> [[V3:%.*]], <vscale x 4 x half> [[V4:%.*]], <vscale x 4 x half> [[V5:%.*]], <vscale x 4 x half> [[V6:%.*]], <vscale x 4 x half> [[V7:%.*]], half* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3775 // CHECK-RV64-NEXT:    ret void
3776 //
test_vsseg8e16_v_f16m1_m(vbool16_t mask,_Float16 * base,vfloat16m1_t v0,vfloat16m1_t v1,vfloat16m1_t v2,vfloat16m1_t v3,vfloat16m1_t v4,vfloat16m1_t v5,vfloat16m1_t v6,vfloat16m1_t v7,size_t vl)3777 void test_vsseg8e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) {
3778   return vsseg8e16_v_f16m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
3779 }
3780 
3781 // CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m2_m(
3782 // CHECK-RV64-NEXT:  entry:
3783 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv8f16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3784 // CHECK-RV64-NEXT:    ret void
3785 //
test_vsseg2e16_v_f16m2_m(vbool8_t mask,_Float16 * base,vfloat16m2_t v0,vfloat16m2_t v1,size_t vl)3786 void test_vsseg2e16_v_f16m2_m (vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) {
3787   return vsseg2e16_v_f16m2_m(mask, base, v0, v1, vl);
3788 }
3789 
3790 // CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m2_m(
3791 // CHECK-RV64-NEXT:  entry:
3792 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv8f16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], half* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3793 // CHECK-RV64-NEXT:    ret void
3794 //
test_vsseg3e16_v_f16m2_m(vbool8_t mask,_Float16 * base,vfloat16m2_t v0,vfloat16m2_t v1,vfloat16m2_t v2,size_t vl)3795 void test_vsseg3e16_v_f16m2_m (vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) {
3796   return vsseg3e16_v_f16m2_m(mask, base, v0, v1, v2, vl);
3797 }
3798 
3799 // CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m2_m(
3800 // CHECK-RV64-NEXT:  entry:
3801 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv8f16.i64(<vscale x 8 x half> [[V0:%.*]], <vscale x 8 x half> [[V1:%.*]], <vscale x 8 x half> [[V2:%.*]], <vscale x 8 x half> [[V3:%.*]], half* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3802 // CHECK-RV64-NEXT:    ret void
3803 //
test_vsseg4e16_v_f16m2_m(vbool8_t mask,_Float16 * base,vfloat16m2_t v0,vfloat16m2_t v1,vfloat16m2_t v2,vfloat16m2_t v3,size_t vl)3804 void test_vsseg4e16_v_f16m2_m (vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) {
3805   return vsseg4e16_v_f16m2_m(mask, base, v0, v1, v2, v3, vl);
3806 }
3807 
3808 // CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m4_m(
3809 // CHECK-RV64-NEXT:  entry:
3810 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv16f16.i64(<vscale x 16 x half> [[V0:%.*]], <vscale x 16 x half> [[V1:%.*]], half* [[BASE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3811 // CHECK-RV64-NEXT:    ret void
3812 //
test_vsseg2e16_v_f16m4_m(vbool4_t mask,_Float16 * base,vfloat16m4_t v0,vfloat16m4_t v1,size_t vl)3813 void test_vsseg2e16_v_f16m4_m (vbool4_t mask, _Float16 *base, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) {
3814   return vsseg2e16_v_f16m4_m(mask, base, v0, v1, vl);
3815 }
3816 
3817 // CHECK-RV64-LABEL: @test_vsseg2e32_v_f32mf2_m(
3818 // CHECK-RV64-NEXT:  entry:
3819 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv1f32.i64(<vscale x 1 x float> [[V0:%.*]], <vscale x 1 x float> [[V1:%.*]], float* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3820 // CHECK-RV64-NEXT:    ret void
3821 //
test_vsseg2e32_v_f32mf2_m(vbool64_t mask,float * base,vfloat32mf2_t v0,vfloat32mf2_t v1,size_t vl)3822 void test_vsseg2e32_v_f32mf2_m (vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) {
3823   return vsseg2e32_v_f32mf2_m(mask, base, v0, v1, vl);
3824 }
3825 
3826 // CHECK-RV64-LABEL: @test_vsseg3e32_v_f32mf2_m(
3827 // CHECK-RV64-NEXT:  entry:
3828 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv1f32.i64(<vscale x 1 x float> [[V0:%.*]], <vscale x 1 x float> [[V1:%.*]], <vscale x 1 x float> [[V2:%.*]], float* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3829 // CHECK-RV64-NEXT:    ret void
3830 //
test_vsseg3e32_v_f32mf2_m(vbool64_t mask,float * base,vfloat32mf2_t v0,vfloat32mf2_t v1,vfloat32mf2_t v2,size_t vl)3831 void test_vsseg3e32_v_f32mf2_m (vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) {
3832   return vsseg3e32_v_f32mf2_m(mask, base, v0, v1, v2, vl);
3833 }
3834 
3835 // CHECK-RV64-LABEL: @test_vsseg4e32_v_f32mf2_m(
3836 // CHECK-RV64-NEXT:  entry:
3837 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv1f32.i64(<vscale x 1 x float> [[V0:%.*]], <vscale x 1 x float> [[V1:%.*]], <vscale x 1 x float> [[V2:%.*]], <vscale x 1 x float> [[V3:%.*]], float* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3838 // CHECK-RV64-NEXT:    ret void
3839 //
test_vsseg4e32_v_f32mf2_m(vbool64_t mask,float * base,vfloat32mf2_t v0,vfloat32mf2_t v1,vfloat32mf2_t v2,vfloat32mf2_t v3,size_t vl)3840 void test_vsseg4e32_v_f32mf2_m (vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) {
3841   return vsseg4e32_v_f32mf2_m(mask, base, v0, v1, v2, v3, vl);
3842 }
3843 
3844 // CHECK-RV64-LABEL: @test_vsseg5e32_v_f32mf2_m(
3845 // CHECK-RV64-NEXT:  entry:
3846 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv1f32.i64(<vscale x 1 x float> [[V0:%.*]], <vscale x 1 x float> [[V1:%.*]], <vscale x 1 x float> [[V2:%.*]], <vscale x 1 x float> [[V3:%.*]], <vscale x 1 x float> [[V4:%.*]], float* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3847 // CHECK-RV64-NEXT:    ret void
3848 //
test_vsseg5e32_v_f32mf2_m(vbool64_t mask,float * base,vfloat32mf2_t v0,vfloat32mf2_t v1,vfloat32mf2_t v2,vfloat32mf2_t v3,vfloat32mf2_t v4,size_t vl)3849 void test_vsseg5e32_v_f32mf2_m (vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) {
3850   return vsseg5e32_v_f32mf2_m(mask, base, v0, v1, v2, v3, v4, vl);
3851 }
3852 
3853 // CHECK-RV64-LABEL: @test_vsseg6e32_v_f32mf2_m(
3854 // CHECK-RV64-NEXT:  entry:
3855 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv1f32.i64(<vscale x 1 x float> [[V0:%.*]], <vscale x 1 x float> [[V1:%.*]], <vscale x 1 x float> [[V2:%.*]], <vscale x 1 x float> [[V3:%.*]], <vscale x 1 x float> [[V4:%.*]], <vscale x 1 x float> [[V5:%.*]], float* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3856 // CHECK-RV64-NEXT:    ret void
3857 //
test_vsseg6e32_v_f32mf2_m(vbool64_t mask,float * base,vfloat32mf2_t v0,vfloat32mf2_t v1,vfloat32mf2_t v2,vfloat32mf2_t v3,vfloat32mf2_t v4,vfloat32mf2_t v5,size_t vl)3858 void test_vsseg6e32_v_f32mf2_m (vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) {
3859   return vsseg6e32_v_f32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
3860 }
3861 
3862 // CHECK-RV64-LABEL: @test_vsseg7e32_v_f32mf2_m(
3863 // CHECK-RV64-NEXT:  entry:
3864 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv1f32.i64(<vscale x 1 x float> [[V0:%.*]], <vscale x 1 x float> [[V1:%.*]], <vscale x 1 x float> [[V2:%.*]], <vscale x 1 x float> [[V3:%.*]], <vscale x 1 x float> [[V4:%.*]], <vscale x 1 x float> [[V5:%.*]], <vscale x 1 x float> [[V6:%.*]], float* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3865 // CHECK-RV64-NEXT:    ret void
3866 //
test_vsseg7e32_v_f32mf2_m(vbool64_t mask,float * base,vfloat32mf2_t v0,vfloat32mf2_t v1,vfloat32mf2_t v2,vfloat32mf2_t v3,vfloat32mf2_t v4,vfloat32mf2_t v5,vfloat32mf2_t v6,size_t vl)3867 void test_vsseg7e32_v_f32mf2_m (vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) {
3868   return vsseg7e32_v_f32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
3869 }
3870 
3871 // CHECK-RV64-LABEL: @test_vsseg8e32_v_f32mf2_m(
3872 // CHECK-RV64-NEXT:  entry:
3873 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv1f32.i64(<vscale x 1 x float> [[V0:%.*]], <vscale x 1 x float> [[V1:%.*]], <vscale x 1 x float> [[V2:%.*]], <vscale x 1 x float> [[V3:%.*]], <vscale x 1 x float> [[V4:%.*]], <vscale x 1 x float> [[V5:%.*]], <vscale x 1 x float> [[V6:%.*]], <vscale x 1 x float> [[V7:%.*]], float* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3874 // CHECK-RV64-NEXT:    ret void
3875 //
test_vsseg8e32_v_f32mf2_m(vbool64_t mask,float * base,vfloat32mf2_t v0,vfloat32mf2_t v1,vfloat32mf2_t v2,vfloat32mf2_t v3,vfloat32mf2_t v4,vfloat32mf2_t v5,vfloat32mf2_t v6,vfloat32mf2_t v7,size_t vl)3876 void test_vsseg8e32_v_f32mf2_m (vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) {
3877   return vsseg8e32_v_f32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
3878 }
3879 
3880 // CHECK-RV64-LABEL: @test_vsseg2e32_v_f32m1_m(
3881 // CHECK-RV64-NEXT:  entry:
3882 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv2f32.i64(<vscale x 2 x float> [[V0:%.*]], <vscale x 2 x float> [[V1:%.*]], float* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3883 // CHECK-RV64-NEXT:    ret void
3884 //
test_vsseg2e32_v_f32m1_m(vbool32_t mask,float * base,vfloat32m1_t v0,vfloat32m1_t v1,size_t vl)3885 void test_vsseg2e32_v_f32m1_m (vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) {
3886   return vsseg2e32_v_f32m1_m(mask, base, v0, v1, vl);
3887 }
3888 
3889 // CHECK-RV64-LABEL: @test_vsseg3e32_v_f32m1_m(
3890 // CHECK-RV64-NEXT:  entry:
3891 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv2f32.i64(<vscale x 2 x float> [[V0:%.*]], <vscale x 2 x float> [[V1:%.*]], <vscale x 2 x float> [[V2:%.*]], float* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3892 // CHECK-RV64-NEXT:    ret void
3893 //
test_vsseg3e32_v_f32m1_m(vbool32_t mask,float * base,vfloat32m1_t v0,vfloat32m1_t v1,vfloat32m1_t v2,size_t vl)3894 void test_vsseg3e32_v_f32m1_m (vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) {
3895   return vsseg3e32_v_f32m1_m(mask, base, v0, v1, v2, vl);
3896 }
3897 
3898 // CHECK-RV64-LABEL: @test_vsseg4e32_v_f32m1_m(
3899 // CHECK-RV64-NEXT:  entry:
3900 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv2f32.i64(<vscale x 2 x float> [[V0:%.*]], <vscale x 2 x float> [[V1:%.*]], <vscale x 2 x float> [[V2:%.*]], <vscale x 2 x float> [[V3:%.*]], float* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3901 // CHECK-RV64-NEXT:    ret void
3902 //
test_vsseg4e32_v_f32m1_m(vbool32_t mask,float * base,vfloat32m1_t v0,vfloat32m1_t v1,vfloat32m1_t v2,vfloat32m1_t v3,size_t vl)3903 void test_vsseg4e32_v_f32m1_m (vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) {
3904   return vsseg4e32_v_f32m1_m(mask, base, v0, v1, v2, v3, vl);
3905 }
3906 
3907 // CHECK-RV64-LABEL: @test_vsseg5e32_v_f32m1_m(
3908 // CHECK-RV64-NEXT:  entry:
3909 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv2f32.i64(<vscale x 2 x float> [[V0:%.*]], <vscale x 2 x float> [[V1:%.*]], <vscale x 2 x float> [[V2:%.*]], <vscale x 2 x float> [[V3:%.*]], <vscale x 2 x float> [[V4:%.*]], float* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3910 // CHECK-RV64-NEXT:    ret void
3911 //
test_vsseg5e32_v_f32m1_m(vbool32_t mask,float * base,vfloat32m1_t v0,vfloat32m1_t v1,vfloat32m1_t v2,vfloat32m1_t v3,vfloat32m1_t v4,size_t vl)3912 void test_vsseg5e32_v_f32m1_m (vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) {
3913   return vsseg5e32_v_f32m1_m(mask, base, v0, v1, v2, v3, v4, vl);
3914 }
3915 
3916 // CHECK-RV64-LABEL: @test_vsseg6e32_v_f32m1_m(
3917 // CHECK-RV64-NEXT:  entry:
3918 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv2f32.i64(<vscale x 2 x float> [[V0:%.*]], <vscale x 2 x float> [[V1:%.*]], <vscale x 2 x float> [[V2:%.*]], <vscale x 2 x float> [[V3:%.*]], <vscale x 2 x float> [[V4:%.*]], <vscale x 2 x float> [[V5:%.*]], float* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3919 // CHECK-RV64-NEXT:    ret void
3920 //
test_vsseg6e32_v_f32m1_m(vbool32_t mask,float * base,vfloat32m1_t v0,vfloat32m1_t v1,vfloat32m1_t v2,vfloat32m1_t v3,vfloat32m1_t v4,vfloat32m1_t v5,size_t vl)3921 void test_vsseg6e32_v_f32m1_m (vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) {
3922   return vsseg6e32_v_f32m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
3923 }
3924 
3925 // CHECK-RV64-LABEL: @test_vsseg7e32_v_f32m1_m(
3926 // CHECK-RV64-NEXT:  entry:
3927 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv2f32.i64(<vscale x 2 x float> [[V0:%.*]], <vscale x 2 x float> [[V1:%.*]], <vscale x 2 x float> [[V2:%.*]], <vscale x 2 x float> [[V3:%.*]], <vscale x 2 x float> [[V4:%.*]], <vscale x 2 x float> [[V5:%.*]], <vscale x 2 x float> [[V6:%.*]], float* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3928 // CHECK-RV64-NEXT:    ret void
3929 //
test_vsseg7e32_v_f32m1_m(vbool32_t mask,float * base,vfloat32m1_t v0,vfloat32m1_t v1,vfloat32m1_t v2,vfloat32m1_t v3,vfloat32m1_t v4,vfloat32m1_t v5,vfloat32m1_t v6,size_t vl)3930 void test_vsseg7e32_v_f32m1_m (vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) {
3931   return vsseg7e32_v_f32m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
3932 }
3933 
3934 // CHECK-RV64-LABEL: @test_vsseg8e32_v_f32m1_m(
3935 // CHECK-RV64-NEXT:  entry:
3936 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv2f32.i64(<vscale x 2 x float> [[V0:%.*]], <vscale x 2 x float> [[V1:%.*]], <vscale x 2 x float> [[V2:%.*]], <vscale x 2 x float> [[V3:%.*]], <vscale x 2 x float> [[V4:%.*]], <vscale x 2 x float> [[V5:%.*]], <vscale x 2 x float> [[V6:%.*]], <vscale x 2 x float> [[V7:%.*]], float* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3937 // CHECK-RV64-NEXT:    ret void
3938 //
test_vsseg8e32_v_f32m1_m(vbool32_t mask,float * base,vfloat32m1_t v0,vfloat32m1_t v1,vfloat32m1_t v2,vfloat32m1_t v3,vfloat32m1_t v4,vfloat32m1_t v5,vfloat32m1_t v6,vfloat32m1_t v7,size_t vl)3939 void test_vsseg8e32_v_f32m1_m (vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) {
3940   return vsseg8e32_v_f32m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
3941 }
3942 
3943 // CHECK-RV64-LABEL: @test_vsseg2e32_v_f32m2_m(
3944 // CHECK-RV64-NEXT:  entry:
3945 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv4f32.i64(<vscale x 4 x float> [[V0:%.*]], <vscale x 4 x float> [[V1:%.*]], float* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3946 // CHECK-RV64-NEXT:    ret void
3947 //
test_vsseg2e32_v_f32m2_m(vbool16_t mask,float * base,vfloat32m2_t v0,vfloat32m2_t v1,size_t vl)3948 void test_vsseg2e32_v_f32m2_m (vbool16_t mask, float *base, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) {
3949   return vsseg2e32_v_f32m2_m(mask, base, v0, v1, vl);
3950 }
3951 
3952 // CHECK-RV64-LABEL: @test_vsseg3e32_v_f32m2_m(
3953 // CHECK-RV64-NEXT:  entry:
3954 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv4f32.i64(<vscale x 4 x float> [[V0:%.*]], <vscale x 4 x float> [[V1:%.*]], <vscale x 4 x float> [[V2:%.*]], float* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3955 // CHECK-RV64-NEXT:    ret void
3956 //
test_vsseg3e32_v_f32m2_m(vbool16_t mask,float * base,vfloat32m2_t v0,vfloat32m2_t v1,vfloat32m2_t v2,size_t vl)3957 void test_vsseg3e32_v_f32m2_m (vbool16_t mask, float *base, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) {
3958   return vsseg3e32_v_f32m2_m(mask, base, v0, v1, v2, vl);
3959 }
3960 
3961 // CHECK-RV64-LABEL: @test_vsseg4e32_v_f32m2_m(
3962 // CHECK-RV64-NEXT:  entry:
3963 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv4f32.i64(<vscale x 4 x float> [[V0:%.*]], <vscale x 4 x float> [[V1:%.*]], <vscale x 4 x float> [[V2:%.*]], <vscale x 4 x float> [[V3:%.*]], float* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3964 // CHECK-RV64-NEXT:    ret void
3965 //
test_vsseg4e32_v_f32m2_m(vbool16_t mask,float * base,vfloat32m2_t v0,vfloat32m2_t v1,vfloat32m2_t v2,vfloat32m2_t v3,size_t vl)3966 void test_vsseg4e32_v_f32m2_m (vbool16_t mask, float *base, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) {
3967   return vsseg4e32_v_f32m2_m(mask, base, v0, v1, v2, v3, vl);
3968 }
3969 
3970 // CHECK-RV64-LABEL: @test_vsseg2e32_v_f32m4_m(
3971 // CHECK-RV64-NEXT:  entry:
3972 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv8f32.i64(<vscale x 8 x float> [[V0:%.*]], <vscale x 8 x float> [[V1:%.*]], float* [[BASE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3973 // CHECK-RV64-NEXT:    ret void
3974 //
test_vsseg2e32_v_f32m4_m(vbool8_t mask,float * base,vfloat32m4_t v0,vfloat32m4_t v1,size_t vl)3975 void test_vsseg2e32_v_f32m4_m (vbool8_t mask, float *base, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) {
3976   return vsseg2e32_v_f32m4_m(mask, base, v0, v1, vl);
3977 }
3978 
3979 // CHECK-RV64-LABEL: @test_vsseg2e64_v_f64m1_m(
3980 // CHECK-RV64-NEXT:  entry:
3981 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv1f64.i64(<vscale x 1 x double> [[V0:%.*]], <vscale x 1 x double> [[V1:%.*]], double* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3982 // CHECK-RV64-NEXT:    ret void
3983 //
test_vsseg2e64_v_f64m1_m(vbool64_t mask,double * base,vfloat64m1_t v0,vfloat64m1_t v1,size_t vl)3984 void test_vsseg2e64_v_f64m1_m (vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) {
3985   return vsseg2e64_v_f64m1_m(mask, base, v0, v1, vl);
3986 }
3987 
3988 // CHECK-RV64-LABEL: @test_vsseg3e64_v_f64m1_m(
3989 // CHECK-RV64-NEXT:  entry:
3990 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv1f64.i64(<vscale x 1 x double> [[V0:%.*]], <vscale x 1 x double> [[V1:%.*]], <vscale x 1 x double> [[V2:%.*]], double* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
3991 // CHECK-RV64-NEXT:    ret void
3992 //
test_vsseg3e64_v_f64m1_m(vbool64_t mask,double * base,vfloat64m1_t v0,vfloat64m1_t v1,vfloat64m1_t v2,size_t vl)3993 void test_vsseg3e64_v_f64m1_m (vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) {
3994   return vsseg3e64_v_f64m1_m(mask, base, v0, v1, v2, vl);
3995 }
3996 
3997 // CHECK-RV64-LABEL: @test_vsseg4e64_v_f64m1_m(
3998 // CHECK-RV64-NEXT:  entry:
3999 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv1f64.i64(<vscale x 1 x double> [[V0:%.*]], <vscale x 1 x double> [[V1:%.*]], <vscale x 1 x double> [[V2:%.*]], <vscale x 1 x double> [[V3:%.*]], double* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
4000 // CHECK-RV64-NEXT:    ret void
4001 //
test_vsseg4e64_v_f64m1_m(vbool64_t mask,double * base,vfloat64m1_t v0,vfloat64m1_t v1,vfloat64m1_t v2,vfloat64m1_t v3,size_t vl)4002 void test_vsseg4e64_v_f64m1_m (vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) {
4003   return vsseg4e64_v_f64m1_m(mask, base, v0, v1, v2, v3, vl);
4004 }
4005 
4006 // CHECK-RV64-LABEL: @test_vsseg5e64_v_f64m1_m(
4007 // CHECK-RV64-NEXT:  entry:
4008 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg5.mask.nxv1f64.i64(<vscale x 1 x double> [[V0:%.*]], <vscale x 1 x double> [[V1:%.*]], <vscale x 1 x double> [[V2:%.*]], <vscale x 1 x double> [[V3:%.*]], <vscale x 1 x double> [[V4:%.*]], double* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
4009 // CHECK-RV64-NEXT:    ret void
4010 //
test_vsseg5e64_v_f64m1_m(vbool64_t mask,double * base,vfloat64m1_t v0,vfloat64m1_t v1,vfloat64m1_t v2,vfloat64m1_t v3,vfloat64m1_t v4,size_t vl)4011 void test_vsseg5e64_v_f64m1_m (vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) {
4012   return vsseg5e64_v_f64m1_m(mask, base, v0, v1, v2, v3, v4, vl);
4013 }
4014 
4015 // CHECK-RV64-LABEL: @test_vsseg6e64_v_f64m1_m(
4016 // CHECK-RV64-NEXT:  entry:
4017 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg6.mask.nxv1f64.i64(<vscale x 1 x double> [[V0:%.*]], <vscale x 1 x double> [[V1:%.*]], <vscale x 1 x double> [[V2:%.*]], <vscale x 1 x double> [[V3:%.*]], <vscale x 1 x double> [[V4:%.*]], <vscale x 1 x double> [[V5:%.*]], double* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
4018 // CHECK-RV64-NEXT:    ret void
4019 //
test_vsseg6e64_v_f64m1_m(vbool64_t mask,double * base,vfloat64m1_t v0,vfloat64m1_t v1,vfloat64m1_t v2,vfloat64m1_t v3,vfloat64m1_t v4,vfloat64m1_t v5,size_t vl)4020 void test_vsseg6e64_v_f64m1_m (vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) {
4021   return vsseg6e64_v_f64m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl);
4022 }
4023 
4024 // CHECK-RV64-LABEL: @test_vsseg7e64_v_f64m1_m(
4025 // CHECK-RV64-NEXT:  entry:
4026 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg7.mask.nxv1f64.i64(<vscale x 1 x double> [[V0:%.*]], <vscale x 1 x double> [[V1:%.*]], <vscale x 1 x double> [[V2:%.*]], <vscale x 1 x double> [[V3:%.*]], <vscale x 1 x double> [[V4:%.*]], <vscale x 1 x double> [[V5:%.*]], <vscale x 1 x double> [[V6:%.*]], double* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
4027 // CHECK-RV64-NEXT:    ret void
4028 //
test_vsseg7e64_v_f64m1_m(vbool64_t mask,double * base,vfloat64m1_t v0,vfloat64m1_t v1,vfloat64m1_t v2,vfloat64m1_t v3,vfloat64m1_t v4,vfloat64m1_t v5,vfloat64m1_t v6,size_t vl)4029 void test_vsseg7e64_v_f64m1_m (vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) {
4030   return vsseg7e64_v_f64m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl);
4031 }
4032 
4033 // CHECK-RV64-LABEL: @test_vsseg8e64_v_f64m1_m(
4034 // CHECK-RV64-NEXT:  entry:
4035 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg8.mask.nxv1f64.i64(<vscale x 1 x double> [[V0:%.*]], <vscale x 1 x double> [[V1:%.*]], <vscale x 1 x double> [[V2:%.*]], <vscale x 1 x double> [[V3:%.*]], <vscale x 1 x double> [[V4:%.*]], <vscale x 1 x double> [[V5:%.*]], <vscale x 1 x double> [[V6:%.*]], <vscale x 1 x double> [[V7:%.*]], double* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
4036 // CHECK-RV64-NEXT:    ret void
4037 //
test_vsseg8e64_v_f64m1_m(vbool64_t mask,double * base,vfloat64m1_t v0,vfloat64m1_t v1,vfloat64m1_t v2,vfloat64m1_t v3,vfloat64m1_t v4,vfloat64m1_t v5,vfloat64m1_t v6,vfloat64m1_t v7,size_t vl)4038 void test_vsseg8e64_v_f64m1_m (vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) {
4039   return vsseg8e64_v_f64m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl);
4040 }
4041 
4042 // CHECK-RV64-LABEL: @test_vsseg2e64_v_f64m2_m(
4043 // CHECK-RV64-NEXT:  entry:
4044 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv2f64.i64(<vscale x 2 x double> [[V0:%.*]], <vscale x 2 x double> [[V1:%.*]], double* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
4045 // CHECK-RV64-NEXT:    ret void
4046 //
test_vsseg2e64_v_f64m2_m(vbool32_t mask,double * base,vfloat64m2_t v0,vfloat64m2_t v1,size_t vl)4047 void test_vsseg2e64_v_f64m2_m (vbool32_t mask, double *base, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) {
4048   return vsseg2e64_v_f64m2_m(mask, base, v0, v1, vl);
4049 }
4050 
4051 // CHECK-RV64-LABEL: @test_vsseg3e64_v_f64m2_m(
4052 // CHECK-RV64-NEXT:  entry:
4053 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg3.mask.nxv2f64.i64(<vscale x 2 x double> [[V0:%.*]], <vscale x 2 x double> [[V1:%.*]], <vscale x 2 x double> [[V2:%.*]], double* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
4054 // CHECK-RV64-NEXT:    ret void
4055 //
test_vsseg3e64_v_f64m2_m(vbool32_t mask,double * base,vfloat64m2_t v0,vfloat64m2_t v1,vfloat64m2_t v2,size_t vl)4056 void test_vsseg3e64_v_f64m2_m (vbool32_t mask, double *base, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) {
4057   return vsseg3e64_v_f64m2_m(mask, base, v0, v1, v2, vl);
4058 }
4059 
4060 // CHECK-RV64-LABEL: @test_vsseg4e64_v_f64m2_m(
4061 // CHECK-RV64-NEXT:  entry:
4062 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg4.mask.nxv2f64.i64(<vscale x 2 x double> [[V0:%.*]], <vscale x 2 x double> [[V1:%.*]], <vscale x 2 x double> [[V2:%.*]], <vscale x 2 x double> [[V3:%.*]], double* [[BASE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
4063 // CHECK-RV64-NEXT:    ret void
4064 //
test_vsseg4e64_v_f64m2_m(vbool32_t mask,double * base,vfloat64m2_t v0,vfloat64m2_t v1,vfloat64m2_t v2,vfloat64m2_t v3,size_t vl)4065 void test_vsseg4e64_v_f64m2_m (vbool32_t mask, double *base, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) {
4066   return vsseg4e64_v_f64m2_m(mask, base, v0, v1, v2, v3, vl);
4067 }
4068 
4069 // CHECK-RV64-LABEL: @test_vsseg2e64_v_f64m4_m(
4070 // CHECK-RV64-NEXT:  entry:
4071 // CHECK-RV64-NEXT:    call void @llvm.riscv.vsseg2.mask.nxv4f64.i64(<vscale x 4 x double> [[V0:%.*]], <vscale x 4 x double> [[V1:%.*]], double* [[BASE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
4072 // CHECK-RV64-NEXT:    ret void
4073 //
test_vsseg2e64_v_f64m4_m(vbool16_t mask,double * base,vfloat64m4_t v0,vfloat64m4_t v1,size_t vl)4074 void test_vsseg2e64_v_f64m4_m (vbool16_t mask, double *base, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) {
4075   return vsseg2e64_v_f64m4_m(mask, base, v0, v1, vl);
4076 }
4077 
4078