1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
4 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
5 
6 #include <riscv_vector.h>
7 
8 // CHECK-RV64-LABEL: @test_vget_v_i8m2_i8m1(
9 // CHECK-RV64-NEXT:  entry:
10 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]], i64 0)
11 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
12 //
test_vget_v_i8m2_i8m1(vint8m2_t src,size_t index)13 vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src, size_t index) {
14   return vget_i8m1(src, 0);
15 }
16 
17 // CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m1(
18 // CHECK-RV64-NEXT:  entry:
19 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 0)
20 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
21 //
test_vget_v_i8m4_i8m1(vint8m4_t src,size_t index)22 vint8m1_t test_vget_v_i8m4_i8m1(vint8m4_t src, size_t index) {
23   return vget_i8m1(src, 0);
24 }
25 
26 // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m1(
27 // CHECK-RV64-NEXT:  entry:
28 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
29 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
30 //
test_vget_v_i8m8_i8m1(vint8m8_t src,size_t index)31 vint8m1_t test_vget_v_i8m8_i8m1(vint8m8_t src, size_t index) {
32   return vget_i8m1(src, 0);
33 }
34 
35 // CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m2(
36 // CHECK-RV64-NEXT:  entry:
37 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 0)
38 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
39 //
test_vget_v_i8m4_i8m2(vint8m4_t src,size_t index)40 vint8m2_t test_vget_v_i8m4_i8m2(vint8m4_t src, size_t index) {
41   return vget_i8m2(src, 0);
42 }
43 
44 // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m2(
45 // CHECK-RV64-NEXT:  entry:
46 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
47 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
48 //
test_vget_v_i8m8_i8m2(vint8m8_t src,size_t index)49 vint8m2_t test_vget_v_i8m8_i8m2(vint8m8_t src, size_t index) {
50   return vget_i8m2(src, 0);
51 }
52 
53 // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m4(
54 // CHECK-RV64-NEXT:  entry:
55 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
56 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
57 //
test_vget_v_i8m8_i8m4(vint8m8_t src,size_t index)58 vint8m4_t test_vget_v_i8m8_i8m4(vint8m8_t src, size_t index) {
59   return vget_i8m4(src, 0);
60 }
61 
62 // CHECK-RV64-LABEL: @test_vget_v_u8m2_u8m1(
63 // CHECK-RV64-NEXT:  entry:
64 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]], i64 0)
65 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
66 //
test_vget_v_u8m2_u8m1(vuint8m2_t src,size_t index)67 vuint8m1_t test_vget_v_u8m2_u8m1(vuint8m2_t src, size_t index) {
68   return vget_u8m1(src, 0);
69 }
70 
71 // CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m1(
72 // CHECK-RV64-NEXT:  entry:
73 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 0)
74 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
75 //
test_vget_v_u8m4_u8m1(vuint8m4_t src,size_t index)76 vuint8m1_t test_vget_v_u8m4_u8m1(vuint8m4_t src, size_t index) {
77   return vget_u8m1(src, 0);
78 }
79 
80 // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m1(
81 // CHECK-RV64-NEXT:  entry:
82 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
83 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
84 //
test_vget_v_u8m8_u8m1(vuint8m8_t src,size_t index)85 vuint8m1_t test_vget_v_u8m8_u8m1(vuint8m8_t src, size_t index) {
86   return vget_u8m1(src, 0);
87 }
88 
89 // CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m2(
90 // CHECK-RV64-NEXT:  entry:
91 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 0)
92 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
93 //
test_vget_v_u8m4_u8m2(vuint8m4_t src,size_t index)94 vuint8m2_t test_vget_v_u8m4_u8m2(vuint8m4_t src, size_t index) {
95   return vget_u8m2(src, 0);
96 }
97 
98 // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m2(
99 // CHECK-RV64-NEXT:  entry:
100 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
101 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
102 //
test_vget_v_u8m8_u8m2(vuint8m8_t src,size_t index)103 vuint8m2_t test_vget_v_u8m8_u8m2(vuint8m8_t src, size_t index) {
104   return vget_u8m2(src, 0);
105 }
106 
107 // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m4(
108 // CHECK-RV64-NEXT:  entry:
109 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
110 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
111 //
test_vget_v_u8m8_u8m4(vuint8m8_t src,size_t index)112 vuint8m4_t test_vget_v_u8m8_u8m4(vuint8m8_t src, size_t index) {
113   return vget_u8m4(src, 0);
114 }
115 
116 // CHECK-RV64-LABEL: @test_vget_v_i16m2_i16m1(
117 // CHECK-RV64-NEXT:  entry:
118 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]], i64 0)
119 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
120 //
test_vget_v_i16m2_i16m1(vint16m2_t src,size_t index)121 vint16m1_t test_vget_v_i16m2_i16m1(vint16m2_t src, size_t index) {
122   return vget_i16m1(src, 0);
123 }
124 
125 // CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m1(
126 // CHECK-RV64-NEXT:  entry:
127 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 0)
128 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
129 //
test_vget_v_i16m4_i16m1(vint16m4_t src,size_t index)130 vint16m1_t test_vget_v_i16m4_i16m1(vint16m4_t src, size_t index) {
131   return vget_i16m1(src, 0);
132 }
133 
134 // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m1(
135 // CHECK-RV64-NEXT:  entry:
136 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
137 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
138 //
test_vget_v_i16m8_i16m1(vint16m8_t src,size_t index)139 vint16m1_t test_vget_v_i16m8_i16m1(vint16m8_t src, size_t index) {
140   return vget_i16m1(src, 0);
141 }
142 
143 // CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m2(
144 // CHECK-RV64-NEXT:  entry:
145 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 0)
146 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
147 //
test_vget_v_i16m4_i16m2(vint16m4_t src,size_t index)148 vint16m2_t test_vget_v_i16m4_i16m2(vint16m4_t src, size_t index) {
149   return vget_i16m2(src, 0);
150 }
151 
152 // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m2(
153 // CHECK-RV64-NEXT:  entry:
154 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
155 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
156 //
test_vget_v_i16m8_i16m2(vint16m8_t src,size_t index)157 vint16m2_t test_vget_v_i16m8_i16m2(vint16m8_t src, size_t index) {
158   return vget_i16m2(src, 0);
159 }
160 
161 // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m4(
162 // CHECK-RV64-NEXT:  entry:
163 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
164 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
165 //
test_vget_v_i16m8_i16m4(vint16m8_t src,size_t index)166 vint16m4_t test_vget_v_i16m8_i16m4(vint16m8_t src, size_t index) {
167   return vget_i16m4(src, 0);
168 }
169 
170 // CHECK-RV64-LABEL: @test_vget_v_u16m2_u16m1(
171 // CHECK-RV64-NEXT:  entry:
172 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]], i64 0)
173 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
174 //
test_vget_v_u16m2_u16m1(vuint16m2_t src,size_t index)175 vuint16m1_t test_vget_v_u16m2_u16m1(vuint16m2_t src, size_t index) {
176   return vget_u16m1(src, 0);
177 }
178 
179 // CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m1(
180 // CHECK-RV64-NEXT:  entry:
181 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 0)
182 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
183 //
test_vget_v_u16m4_u16m1(vuint16m4_t src,size_t index)184 vuint16m1_t test_vget_v_u16m4_u16m1(vuint16m4_t src, size_t index) {
185   return vget_u16m1(src, 0);
186 }
187 
188 // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m1(
189 // CHECK-RV64-NEXT:  entry:
190 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
191 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
192 //
test_vget_v_u16m8_u16m1(vuint16m8_t src,size_t index)193 vuint16m1_t test_vget_v_u16m8_u16m1(vuint16m8_t src, size_t index) {
194   return vget_u16m1(src, 0);
195 }
196 
197 // CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m2(
198 // CHECK-RV64-NEXT:  entry:
199 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 0)
200 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
201 //
test_vget_v_u16m4_u16m2(vuint16m4_t src,size_t index)202 vuint16m2_t test_vget_v_u16m4_u16m2(vuint16m4_t src, size_t index) {
203   return vget_u16m2(src, 0);
204 }
205 
206 // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m2(
207 // CHECK-RV64-NEXT:  entry:
208 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
209 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
210 //
test_vget_v_u16m8_u16m2(vuint16m8_t src,size_t index)211 vuint16m2_t test_vget_v_u16m8_u16m2(vuint16m8_t src, size_t index) {
212   return vget_u16m2(src, 0);
213 }
214 
215 // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m4(
216 // CHECK-RV64-NEXT:  entry:
217 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
218 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
219 //
test_vget_v_u16m8_u16m4(vuint16m8_t src,size_t index)220 vuint16m4_t test_vget_v_u16m8_u16m4(vuint16m8_t src, size_t index) {
221   return vget_u16m4(src, 0);
222 }
223 
224 // CHECK-RV64-LABEL: @test_vget_v_i32m2_i32m1(
225 // CHECK-RV64-NEXT:  entry:
226 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]], i64 0)
227 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
228 //
test_vget_v_i32m2_i32m1(vint32m2_t src,size_t index)229 vint32m1_t test_vget_v_i32m2_i32m1(vint32m2_t src, size_t index) {
230   return vget_i32m1(src, 0);
231 }
232 
233 // CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m1(
234 // CHECK-RV64-NEXT:  entry:
235 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 0)
236 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
237 //
test_vget_v_i32m4_i32m1(vint32m4_t src,size_t index)238 vint32m1_t test_vget_v_i32m4_i32m1(vint32m4_t src, size_t index) {
239   return vget_i32m1(src, 0);
240 }
241 
242 // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m1(
243 // CHECK-RV64-NEXT:  entry:
244 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
245 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
246 //
test_vget_v_i32m8_i32m1(vint32m8_t src,size_t index)247 vint32m1_t test_vget_v_i32m8_i32m1(vint32m8_t src, size_t index) {
248   return vget_i32m1(src, 0);
249 }
250 
251 // CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m2(
252 // CHECK-RV64-NEXT:  entry:
253 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 0)
254 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
255 //
test_vget_v_i32m4_i32m2(vint32m4_t src,size_t index)256 vint32m2_t test_vget_v_i32m4_i32m2(vint32m4_t src, size_t index) {
257   return vget_i32m2(src, 0);
258 }
259 
260 // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m2(
261 // CHECK-RV64-NEXT:  entry:
262 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
263 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
264 //
test_vget_v_i32m8_i32m2(vint32m8_t src,size_t index)265 vint32m2_t test_vget_v_i32m8_i32m2(vint32m8_t src, size_t index) {
266   return vget_i32m2(src, 0);
267 }
268 
269 // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m4(
270 // CHECK-RV64-NEXT:  entry:
271 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
272 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
273 //
test_vget_v_i32m8_i32m4(vint32m8_t src,size_t index)274 vint32m4_t test_vget_v_i32m8_i32m4(vint32m8_t src, size_t index) {
275   return vget_i32m4(src, 0);
276 }
277 
278 // CHECK-RV64-LABEL: @test_vget_v_u32m2_u32m1(
279 // CHECK-RV64-NEXT:  entry:
280 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]], i64 0)
281 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
282 //
test_vget_v_u32m2_u32m1(vuint32m2_t src,size_t index)283 vuint32m1_t test_vget_v_u32m2_u32m1(vuint32m2_t src, size_t index) {
284   return vget_u32m1(src, 0);
285 }
286 
287 // CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m1(
288 // CHECK-RV64-NEXT:  entry:
289 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 0)
290 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
291 //
test_vget_v_u32m4_u32m1(vuint32m4_t src,size_t index)292 vuint32m1_t test_vget_v_u32m4_u32m1(vuint32m4_t src, size_t index) {
293   return vget_u32m1(src, 0);
294 }
295 
296 // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m1(
297 // CHECK-RV64-NEXT:  entry:
298 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
299 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
300 //
test_vget_v_u32m8_u32m1(vuint32m8_t src,size_t index)301 vuint32m1_t test_vget_v_u32m8_u32m1(vuint32m8_t src, size_t index) {
302   return vget_u32m1(src, 0);
303 }
304 
305 // CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m2(
306 // CHECK-RV64-NEXT:  entry:
307 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 0)
308 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
309 //
test_vget_v_u32m4_u32m2(vuint32m4_t src,size_t index)310 vuint32m2_t test_vget_v_u32m4_u32m2(vuint32m4_t src, size_t index) {
311   return vget_u32m2(src, 0);
312 }
313 
314 // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m2(
315 // CHECK-RV64-NEXT:  entry:
316 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
317 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
318 //
test_vget_v_u32m8_u32m2(vuint32m8_t src,size_t index)319 vuint32m2_t test_vget_v_u32m8_u32m2(vuint32m8_t src, size_t index) {
320   return vget_u32m2(src, 0);
321 }
322 
323 // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m4(
324 // CHECK-RV64-NEXT:  entry:
325 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
326 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
327 //
test_vget_v_u32m8_u32m4(vuint32m8_t src,size_t index)328 vuint32m4_t test_vget_v_u32m8_u32m4(vuint32m8_t src, size_t index) {
329   return vget_u32m4(src, 0);
330 }
331 
332 // CHECK-RV64-LABEL: @test_vget_v_f32m2_f32m1(
333 // CHECK-RV64-NEXT:  entry:
334 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv4f32(<vscale x 4 x float> [[SRC:%.*]], i64 0)
335 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
336 //
test_vget_v_f32m2_f32m1(vfloat32m2_t src,size_t index)337 vfloat32m1_t test_vget_v_f32m2_f32m1(vfloat32m2_t src, size_t index) {
338   return vget_f32m1(src, 0);
339 }
340 
341 // CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m1(
342 // CHECK-RV64-NEXT:  entry:
343 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv8f32(<vscale x 8 x float> [[SRC:%.*]], i64 0)
344 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
345 //
test_vget_v_f32m4_f32m1(vfloat32m4_t src,size_t index)346 vfloat32m1_t test_vget_v_f32m4_f32m1(vfloat32m4_t src, size_t index) {
347   return vget_f32m1(src, 0);
348 }
349 
350 // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m1(
351 // CHECK-RV64-NEXT:  entry:
352 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 0)
353 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
354 //
test_vget_v_f32m8_f32m1(vfloat32m8_t src,size_t index)355 vfloat32m1_t test_vget_v_f32m8_f32m1(vfloat32m8_t src, size_t index) {
356   return vget_f32m1(src, 0);
357 }
358 
359 // CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m2(
360 // CHECK-RV64-NEXT:  entry:
361 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.extract.nxv4f32.nxv8f32(<vscale x 8 x float> [[SRC:%.*]], i64 0)
362 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
363 //
test_vget_v_f32m4_f32m2(vfloat32m4_t src,size_t index)364 vfloat32m2_t test_vget_v_f32m4_f32m2(vfloat32m4_t src, size_t index) {
365   return vget_f32m2(src, 0);
366 }
367 
368 // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m2(
369 // CHECK-RV64-NEXT:  entry:
370 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 0)
371 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
372 //
test_vget_v_f32m8_f32m2(vfloat32m8_t src,size_t index)373 vfloat32m2_t test_vget_v_f32m8_f32m2(vfloat32m8_t src, size_t index) {
374   return vget_f32m2(src, 0);
375 }
376 
377 // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m4(
378 // CHECK-RV64-NEXT:  entry:
379 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.extract.nxv8f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 0)
380 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
381 //
test_vget_v_f32m8_f32m4(vfloat32m8_t src,size_t index)382 vfloat32m4_t test_vget_v_f32m8_f32m4(vfloat32m8_t src, size_t index) {
383   return vget_f32m4(src, 0);
384 }
385 
386 // CHECK-RV64-LABEL: @test_vget_v_i64m2_i64m1(
387 // CHECK-RV64-NEXT:  entry:
388 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]], i64 0)
389 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
390 //
test_vget_v_i64m2_i64m1(vint64m2_t src,size_t index)391 vint64m1_t test_vget_v_i64m2_i64m1(vint64m2_t src, size_t index) {
392   return vget_i64m1(src, 0);
393 }
394 
395 // CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m1(
396 // CHECK-RV64-NEXT:  entry:
397 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0)
398 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
399 //
test_vget_v_i64m4_i64m1(vint64m4_t src,size_t index)400 vint64m1_t test_vget_v_i64m4_i64m1(vint64m4_t src, size_t index) {
401   return vget_i64m1(src, 0);
402 }
403 
404 // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m1(
405 // CHECK-RV64-NEXT:  entry:
406 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
407 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
408 //
test_vget_v_i64m8_i64m1(vint64m8_t src,size_t index)409 vint64m1_t test_vget_v_i64m8_i64m1(vint64m8_t src, size_t index) {
410   return vget_i64m1(src, 0);
411 }
412 
413 // CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m2(
414 // CHECK-RV64-NEXT:  entry:
415 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0)
416 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
417 //
test_vget_v_i64m4_i64m2(vint64m4_t src,size_t index)418 vint64m2_t test_vget_v_i64m4_i64m2(vint64m4_t src, size_t index) {
419   return vget_i64m2(src, 0);
420 }
421 
422 // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m2(
423 // CHECK-RV64-NEXT:  entry:
424 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
425 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
426 //
test_vget_v_i64m8_i64m2(vint64m8_t src,size_t index)427 vint64m2_t test_vget_v_i64m8_i64m2(vint64m8_t src, size_t index) {
428   return vget_i64m2(src, 0);
429 }
430 
431 // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m4(
432 // CHECK-RV64-NEXT:  entry:
433 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
434 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
435 //
test_vget_v_i64m8_i64m4(vint64m8_t src,size_t index)436 vint64m4_t test_vget_v_i64m8_i64m4(vint64m8_t src, size_t index) {
437   return vget_i64m4(src, 0);
438 }
439 
440 // CHECK-RV64-LABEL: @test_vget_v_u64m2_u64m1(
441 // CHECK-RV64-NEXT:  entry:
442 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]], i64 0)
443 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
444 //
test_vget_v_u64m2_u64m1(vuint64m2_t src,size_t index)445 vuint64m1_t test_vget_v_u64m2_u64m1(vuint64m2_t src, size_t index) {
446   return vget_u64m1(src, 0);
447 }
448 
449 // CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m1(
450 // CHECK-RV64-NEXT:  entry:
451 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0)
452 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
453 //
test_vget_v_u64m4_u64m1(vuint64m4_t src,size_t index)454 vuint64m1_t test_vget_v_u64m4_u64m1(vuint64m4_t src, size_t index) {
455   return vget_u64m1(src, 0);
456 }
457 
458 // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m1(
459 // CHECK-RV64-NEXT:  entry:
460 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
461 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
462 //
test_vget_v_u64m8_u64m1(vuint64m8_t src,size_t index)463 vuint64m1_t test_vget_v_u64m8_u64m1(vuint64m8_t src, size_t index) {
464   return vget_u64m1(src, 0);
465 }
466 
467 // CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m2(
468 // CHECK-RV64-NEXT:  entry:
469 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0)
470 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
471 //
test_vget_v_u64m4_u64m2(vuint64m4_t src,size_t index)472 vuint64m2_t test_vget_v_u64m4_u64m2(vuint64m4_t src, size_t index) {
473   return vget_u64m2(src, 0);
474 }
475 
476 // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m2(
477 // CHECK-RV64-NEXT:  entry:
478 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
479 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
480 //
test_vget_v_u64m8_u64m2(vuint64m8_t src,size_t index)481 vuint64m2_t test_vget_v_u64m8_u64m2(vuint64m8_t src, size_t index) {
482   return vget_u64m2(src, 0);
483 }
484 
485 // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m4(
486 // CHECK-RV64-NEXT:  entry:
487 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
488 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
489 //
test_vget_v_u64m8_u64m4(vuint64m8_t src,size_t index)490 vuint64m4_t test_vget_v_u64m8_u64m4(vuint64m8_t src, size_t index) {
491   return vget_u64m4(src, 0);
492 }
493 
494 // CHECK-RV64-LABEL: @test_vget_v_f64m2_f64m1(
495 // CHECK-RV64-NEXT:  entry:
496 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv2f64(<vscale x 2 x double> [[SRC:%.*]], i64 0)
497 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
498 //
test_vget_v_f64m2_f64m1(vfloat64m2_t src,size_t index)499 vfloat64m1_t test_vget_v_f64m2_f64m1(vfloat64m2_t src, size_t index) {
500   return vget_f64m1(src, 0);
501 }
502 
503 // CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m1(
504 // CHECK-RV64-NEXT:  entry:
505 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv4f64(<vscale x 4 x double> [[SRC:%.*]], i64 0)
506 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
507 //
test_vget_v_f64m4_f64m1(vfloat64m4_t src,size_t index)508 vfloat64m1_t test_vget_v_f64m4_f64m1(vfloat64m4_t src, size_t index) {
509   return vget_f64m1(src, 0);
510 }
511 
512 // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m1(
513 // CHECK-RV64-NEXT:  entry:
514 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 0)
515 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
516 //
test_vget_v_f64m8_f64m1(vfloat64m8_t src,size_t index)517 vfloat64m1_t test_vget_v_f64m8_f64m1(vfloat64m8_t src, size_t index) {
518   return vget_f64m1(src, 0);
519 }
520 
521 // CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m2(
522 // CHECK-RV64-NEXT:  entry:
523 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.extract.nxv2f64.nxv4f64(<vscale x 4 x double> [[SRC:%.*]], i64 0)
524 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
525 //
test_vget_v_f64m4_f64m2(vfloat64m4_t src,size_t index)526 vfloat64m2_t test_vget_v_f64m4_f64m2(vfloat64m4_t src, size_t index) {
527   return vget_f64m2(src, 0);
528 }
529 
530 // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m2(
531 // CHECK-RV64-NEXT:  entry:
532 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 0)
533 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
534 //
test_vget_v_f64m8_f64m2(vfloat64m8_t src,size_t index)535 vfloat64m2_t test_vget_v_f64m8_f64m2(vfloat64m8_t src, size_t index) {
536   return vget_f64m2(src, 0);
537 }
538 
539 // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m4(
540 // CHECK-RV64-NEXT:  entry:
541 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.extract.nxv4f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 0)
542 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
543 //
test_vget_v_f64m8_f64m4(vfloat64m8_t src,size_t index)544 vfloat64m4_t test_vget_v_f64m8_f64m4(vfloat64m8_t src, size_t index) {
545   return vget_f64m4(src, 0);
546 }
547 
548