1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
4 // RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
5 // RUN:   -disable-O0-optnone  -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
6 
7 #include <riscv_vector.h>
8 
9 // CHECK-RV64-LABEL: @test_vle8_v_i8mf8(
10 // CHECK-RV64-NEXT:  entry:
11 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
12 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.i64(<vscale x 1 x i8>* [[TMP0]], i64 [[VL:%.*]])
13 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
14 //
test_vle8_v_i8mf8(const int8_t * base,size_t vl)15 vint8mf8_t test_vle8_v_i8mf8(const int8_t *base, size_t vl) {
16   return vle8_v_i8mf8(base, vl);
17 }
18 
19 // CHECK-RV64-LABEL: @test_vle8_v_i8mf4(
20 // CHECK-RV64-NEXT:  entry:
21 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
22 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vle.nxv2i8.i64(<vscale x 2 x i8>* [[TMP0]], i64 [[VL:%.*]])
23 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
24 //
test_vle8_v_i8mf4(const int8_t * base,size_t vl)25 vint8mf4_t test_vle8_v_i8mf4(const int8_t *base, size_t vl) {
26   return vle8_v_i8mf4(base, vl);
27 }
28 
29 // CHECK-RV64-LABEL: @test_vle8_v_i8mf2(
30 // CHECK-RV64-NEXT:  entry:
31 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
32 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vle.nxv4i8.i64(<vscale x 4 x i8>* [[TMP0]], i64 [[VL:%.*]])
33 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
34 //
test_vle8_v_i8mf2(const int8_t * base,size_t vl)35 vint8mf2_t test_vle8_v_i8mf2(const int8_t *base, size_t vl) {
36   return vle8_v_i8mf2(base, vl);
37 }
38 
39 // CHECK-RV64-LABEL: @test_vle8_v_i8m1(
40 // CHECK-RV64-NEXT:  entry:
41 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
42 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8.i64(<vscale x 8 x i8>* [[TMP0]], i64 [[VL:%.*]])
43 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
44 //
test_vle8_v_i8m1(const int8_t * base,size_t vl)45 vint8m1_t test_vle8_v_i8m1(const int8_t *base, size_t vl) {
46   return vle8_v_i8m1(base, vl);
47 }
48 
49 // CHECK-RV64-LABEL: @test_vle8_v_i8m2(
50 // CHECK-RV64-NEXT:  entry:
51 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
52 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vle.nxv16i8.i64(<vscale x 16 x i8>* [[TMP0]], i64 [[VL:%.*]])
53 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
54 //
test_vle8_v_i8m2(const int8_t * base,size_t vl)55 vint8m2_t test_vle8_v_i8m2(const int8_t *base, size_t vl) {
56   return vle8_v_i8m2(base, vl);
57 }
58 
59 // CHECK-RV64-LABEL: @test_vle8_v_i8m4(
60 // CHECK-RV64-NEXT:  entry:
61 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
62 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8.i64(<vscale x 32 x i8>* [[TMP0]], i64 [[VL:%.*]])
63 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
64 //
test_vle8_v_i8m4(const int8_t * base,size_t vl)65 vint8m4_t test_vle8_v_i8m4(const int8_t *base, size_t vl) {
66   return vle8_v_i8m4(base, vl);
67 }
68 
69 // CHECK-RV64-LABEL: @test_vle8_v_i8m8(
70 // CHECK-RV64-NEXT:  entry:
71 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
72 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8.i64(<vscale x 64 x i8>* [[TMP0]], i64 [[VL:%.*]])
73 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP1]]
74 //
test_vle8_v_i8m8(const int8_t * base,size_t vl)75 vint8m8_t test_vle8_v_i8m8(const int8_t *base, size_t vl) {
76   return vle8_v_i8m8(base, vl);
77 }
78 
79 // CHECK-RV64-LABEL: @test_vle16_v_i16mf4(
80 // CHECK-RV64-NEXT:  entry:
81 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
82 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vle.nxv1i16.i64(<vscale x 1 x i16>* [[TMP0]], i64 [[VL:%.*]])
83 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
84 //
test_vle16_v_i16mf4(const int16_t * base,size_t vl)85 vint16mf4_t test_vle16_v_i16mf4(const int16_t *base, size_t vl) {
86   return vle16_v_i16mf4(base, vl);
87 }
88 
89 // CHECK-RV64-LABEL: @test_vle16_v_i16mf2(
90 // CHECK-RV64-NEXT:  entry:
91 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
92 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16.i64(<vscale x 2 x i16>* [[TMP0]], i64 [[VL:%.*]])
93 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
94 //
test_vle16_v_i16mf2(const int16_t * base,size_t vl)95 vint16mf2_t test_vle16_v_i16mf2(const int16_t *base, size_t vl) {
96   return vle16_v_i16mf2(base, vl);
97 }
98 
99 // CHECK-RV64-LABEL: @test_vle16_v_i16m1(
100 // CHECK-RV64-NEXT:  entry:
101 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
102 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vle.nxv4i16.i64(<vscale x 4 x i16>* [[TMP0]], i64 [[VL:%.*]])
103 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
104 //
test_vle16_v_i16m1(const int16_t * base,size_t vl)105 vint16m1_t test_vle16_v_i16m1(const int16_t *base, size_t vl) {
106   return vle16_v_i16m1(base, vl);
107 }
108 
109 // CHECK-RV64-LABEL: @test_vle16_v_i16m2(
110 // CHECK-RV64-NEXT:  entry:
111 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
112 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16.i64(<vscale x 8 x i16>* [[TMP0]], i64 [[VL:%.*]])
113 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
114 //
test_vle16_v_i16m2(const int16_t * base,size_t vl)115 vint16m2_t test_vle16_v_i16m2(const int16_t *base, size_t vl) {
116   return vle16_v_i16m2(base, vl);
117 }
118 
119 // CHECK-RV64-LABEL: @test_vle16_v_i16m4(
120 // CHECK-RV64-NEXT:  entry:
121 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
122 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vle.nxv16i16.i64(<vscale x 16 x i16>* [[TMP0]], i64 [[VL:%.*]])
123 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
124 //
test_vle16_v_i16m4(const int16_t * base,size_t vl)125 vint16m4_t test_vle16_v_i16m4(const int16_t *base, size_t vl) {
126   return vle16_v_i16m4(base, vl);
127 }
128 
129 // CHECK-RV64-LABEL: @test_vle16_v_i16m8(
130 // CHECK-RV64-NEXT:  entry:
131 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
132 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16.i64(<vscale x 32 x i16>* [[TMP0]], i64 [[VL:%.*]])
133 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
134 //
test_vle16_v_i16m8(const int16_t * base,size_t vl)135 vint16m8_t test_vle16_v_i16m8(const int16_t *base, size_t vl) {
136   return vle16_v_i16m8(base, vl);
137 }
138 
139 // CHECK-RV64-LABEL: @test_vle32_v_i32mf2(
140 // CHECK-RV64-NEXT:  entry:
141 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
142 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], i64 [[VL:%.*]])
143 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
144 //
test_vle32_v_i32mf2(const int32_t * base,size_t vl)145 vint32mf2_t test_vle32_v_i32mf2(const int32_t *base, size_t vl) {
146   return vle32_v_i32mf2(base, vl);
147 }
148 
149 // CHECK-RV64-LABEL: @test_vle32_v_i32m1(
150 // CHECK-RV64-NEXT:  entry:
151 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
152 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], i64 [[VL:%.*]])
153 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
154 //
test_vle32_v_i32m1(const int32_t * base,size_t vl)155 vint32m1_t test_vle32_v_i32m1(const int32_t *base, size_t vl) {
156   return vle32_v_i32m1(base, vl);
157 }
158 
159 // CHECK-RV64-LABEL: @test_vle32_v_i32m2(
160 // CHECK-RV64-NEXT:  entry:
161 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
162 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], i64 [[VL:%.*]])
163 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
164 //
test_vle32_v_i32m2(const int32_t * base,size_t vl)165 vint32m2_t test_vle32_v_i32m2(const int32_t *base, size_t vl) {
166   return vle32_v_i32m2(base, vl);
167 }
168 
169 // CHECK-RV64-LABEL: @test_vle32_v_i32m4(
170 // CHECK-RV64-NEXT:  entry:
171 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
172 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vle.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], i64 [[VL:%.*]])
173 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
174 //
test_vle32_v_i32m4(const int32_t * base,size_t vl)175 vint32m4_t test_vle32_v_i32m4(const int32_t *base, size_t vl) {
176   return vle32_v_i32m4(base, vl);
177 }
178 
179 // CHECK-RV64-LABEL: @test_vle32_v_i32m8(
180 // CHECK-RV64-NEXT:  entry:
181 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
182 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], i64 [[VL:%.*]])
183 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
184 //
test_vle32_v_i32m8(const int32_t * base,size_t vl)185 vint32m8_t test_vle32_v_i32m8(const int32_t *base, size_t vl) {
186   return vle32_v_i32m8(base, vl);
187 }
188 
189 // CHECK-RV64-LABEL: @test_vle64_v_i64m1(
190 // CHECK-RV64-NEXT:  entry:
191 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
192 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], i64 [[VL:%.*]])
193 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
194 //
test_vle64_v_i64m1(const int64_t * base,size_t vl)195 vint64m1_t test_vle64_v_i64m1(const int64_t *base, size_t vl) {
196   return vle64_v_i64m1(base, vl);
197 }
198 
199 // CHECK-RV64-LABEL: @test_vle64_v_i64m2(
200 // CHECK-RV64-NEXT:  entry:
201 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
202 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], i64 [[VL:%.*]])
203 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
204 //
test_vle64_v_i64m2(const int64_t * base,size_t vl)205 vint64m2_t test_vle64_v_i64m2(const int64_t *base, size_t vl) {
206   return vle64_v_i64m2(base, vl);
207 }
208 
209 // CHECK-RV64-LABEL: @test_vle64_v_i64m4(
210 // CHECK-RV64-NEXT:  entry:
211 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
212 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], i64 [[VL:%.*]])
213 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
214 //
test_vle64_v_i64m4(const int64_t * base,size_t vl)215 vint64m4_t test_vle64_v_i64m4(const int64_t *base, size_t vl) {
216   return vle64_v_i64m4(base, vl);
217 }
218 
219 // CHECK-RV64-LABEL: @test_vle64_v_i64m8(
220 // CHECK-RV64-NEXT:  entry:
221 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
222 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], i64 [[VL:%.*]])
223 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
224 //
test_vle64_v_i64m8(const int64_t * base,size_t vl)225 vint64m8_t test_vle64_v_i64m8(const int64_t *base, size_t vl) {
226   return vle64_v_i64m8(base, vl);
227 }
228 
229 // CHECK-RV64-LABEL: @test_vle8_v_u8mf8(
230 // CHECK-RV64-NEXT:  entry:
231 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
232 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.i64(<vscale x 1 x i8>* [[TMP0]], i64 [[VL:%.*]])
233 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
234 //
test_vle8_v_u8mf8(const uint8_t * base,size_t vl)235 vuint8mf8_t test_vle8_v_u8mf8(const uint8_t *base, size_t vl) {
236   return vle8_v_u8mf8(base, vl);
237 }
238 
239 // CHECK-RV64-LABEL: @test_vle8_v_u8mf4(
240 // CHECK-RV64-NEXT:  entry:
241 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
242 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vle.nxv2i8.i64(<vscale x 2 x i8>* [[TMP0]], i64 [[VL:%.*]])
243 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
244 //
test_vle8_v_u8mf4(const uint8_t * base,size_t vl)245 vuint8mf4_t test_vle8_v_u8mf4(const uint8_t *base, size_t vl) {
246   return vle8_v_u8mf4(base, vl);
247 }
248 
249 // CHECK-RV64-LABEL: @test_vle8_v_u8mf2(
250 // CHECK-RV64-NEXT:  entry:
251 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
252 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vle.nxv4i8.i64(<vscale x 4 x i8>* [[TMP0]], i64 [[VL:%.*]])
253 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
254 //
test_vle8_v_u8mf2(const uint8_t * base,size_t vl)255 vuint8mf2_t test_vle8_v_u8mf2(const uint8_t *base, size_t vl) {
256   return vle8_v_u8mf2(base, vl);
257 }
258 
259 // CHECK-RV64-LABEL: @test_vle8_v_u8m1(
260 // CHECK-RV64-NEXT:  entry:
261 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
262 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8.i64(<vscale x 8 x i8>* [[TMP0]], i64 [[VL:%.*]])
263 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
264 //
test_vle8_v_u8m1(const uint8_t * base,size_t vl)265 vuint8m1_t test_vle8_v_u8m1(const uint8_t *base, size_t vl) {
266   return vle8_v_u8m1(base, vl);
267 }
268 
269 // CHECK-RV64-LABEL: @test_vle8_v_u8m2(
270 // CHECK-RV64-NEXT:  entry:
271 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
272 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vle.nxv16i8.i64(<vscale x 16 x i8>* [[TMP0]], i64 [[VL:%.*]])
273 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
274 //
test_vle8_v_u8m2(const uint8_t * base,size_t vl)275 vuint8m2_t test_vle8_v_u8m2(const uint8_t *base, size_t vl) {
276   return vle8_v_u8m2(base, vl);
277 }
278 
279 // CHECK-RV64-LABEL: @test_vle8_v_u8m4(
280 // CHECK-RV64-NEXT:  entry:
281 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
282 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8.i64(<vscale x 32 x i8>* [[TMP0]], i64 [[VL:%.*]])
283 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
284 //
test_vle8_v_u8m4(const uint8_t * base,size_t vl)285 vuint8m4_t test_vle8_v_u8m4(const uint8_t *base, size_t vl) {
286   return vle8_v_u8m4(base, vl);
287 }
288 
289 // CHECK-RV64-LABEL: @test_vle8_v_u8m8(
290 // CHECK-RV64-NEXT:  entry:
291 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
292 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8.i64(<vscale x 64 x i8>* [[TMP0]], i64 [[VL:%.*]])
293 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP1]]
294 //
test_vle8_v_u8m8(const uint8_t * base,size_t vl)295 vuint8m8_t test_vle8_v_u8m8(const uint8_t *base, size_t vl) {
296   return vle8_v_u8m8(base, vl);
297 }
298 
299 // CHECK-RV64-LABEL: @test_vle16_v_u16mf4(
300 // CHECK-RV64-NEXT:  entry:
301 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
302 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vle.nxv1i16.i64(<vscale x 1 x i16>* [[TMP0]], i64 [[VL:%.*]])
303 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
304 //
test_vle16_v_u16mf4(const uint16_t * base,size_t vl)305 vuint16mf4_t test_vle16_v_u16mf4(const uint16_t *base, size_t vl) {
306   return vle16_v_u16mf4(base, vl);
307 }
308 
309 // CHECK-RV64-LABEL: @test_vle16_v_u16mf2(
310 // CHECK-RV64-NEXT:  entry:
311 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
312 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16.i64(<vscale x 2 x i16>* [[TMP0]], i64 [[VL:%.*]])
313 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
314 //
test_vle16_v_u16mf2(const uint16_t * base,size_t vl)315 vuint16mf2_t test_vle16_v_u16mf2(const uint16_t *base, size_t vl) {
316   return vle16_v_u16mf2(base, vl);
317 }
318 
319 // CHECK-RV64-LABEL: @test_vle16_v_u16m1(
320 // CHECK-RV64-NEXT:  entry:
321 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
322 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vle.nxv4i16.i64(<vscale x 4 x i16>* [[TMP0]], i64 [[VL:%.*]])
323 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
324 //
test_vle16_v_u16m1(const uint16_t * base,size_t vl)325 vuint16m1_t test_vle16_v_u16m1(const uint16_t *base, size_t vl) {
326   return vle16_v_u16m1(base, vl);
327 }
328 
329 // CHECK-RV64-LABEL: @test_vle16_v_u16m2(
330 // CHECK-RV64-NEXT:  entry:
331 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
332 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16.i64(<vscale x 8 x i16>* [[TMP0]], i64 [[VL:%.*]])
333 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
334 //
test_vle16_v_u16m2(const uint16_t * base,size_t vl)335 vuint16m2_t test_vle16_v_u16m2(const uint16_t *base, size_t vl) {
336   return vle16_v_u16m2(base, vl);
337 }
338 
339 // CHECK-RV64-LABEL: @test_vle16_v_u16m4(
340 // CHECK-RV64-NEXT:  entry:
341 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
342 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vle.nxv16i16.i64(<vscale x 16 x i16>* [[TMP0]], i64 [[VL:%.*]])
343 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
344 //
test_vle16_v_u16m4(const uint16_t * base,size_t vl)345 vuint16m4_t test_vle16_v_u16m4(const uint16_t *base, size_t vl) {
346   return vle16_v_u16m4(base, vl);
347 }
348 
349 // CHECK-RV64-LABEL: @test_vle16_v_u16m8(
350 // CHECK-RV64-NEXT:  entry:
351 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
352 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16.i64(<vscale x 32 x i16>* [[TMP0]], i64 [[VL:%.*]])
353 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
354 //
test_vle16_v_u16m8(const uint16_t * base,size_t vl)355 vuint16m8_t test_vle16_v_u16m8(const uint16_t *base, size_t vl) {
356   return vle16_v_u16m8(base, vl);
357 }
358 
359 // CHECK-RV64-LABEL: @test_vle32_v_u32mf2(
360 // CHECK-RV64-NEXT:  entry:
361 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
362 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], i64 [[VL:%.*]])
363 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
364 //
test_vle32_v_u32mf2(const uint32_t * base,size_t vl)365 vuint32mf2_t test_vle32_v_u32mf2(const uint32_t *base, size_t vl) {
366   return vle32_v_u32mf2(base, vl);
367 }
368 
369 // CHECK-RV64-LABEL: @test_vle32_v_u32m1(
370 // CHECK-RV64-NEXT:  entry:
371 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
372 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], i64 [[VL:%.*]])
373 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
374 //
test_vle32_v_u32m1(const uint32_t * base,size_t vl)375 vuint32m1_t test_vle32_v_u32m1(const uint32_t *base, size_t vl) {
376   return vle32_v_u32m1(base, vl);
377 }
378 
379 // CHECK-RV64-LABEL: @test_vle32_v_u32m2(
380 // CHECK-RV64-NEXT:  entry:
381 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
382 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], i64 [[VL:%.*]])
383 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
384 //
test_vle32_v_u32m2(const uint32_t * base,size_t vl)385 vuint32m2_t test_vle32_v_u32m2(const uint32_t *base, size_t vl) {
386   return vle32_v_u32m2(base, vl);
387 }
388 
389 // CHECK-RV64-LABEL: @test_vle32_v_u32m4(
390 // CHECK-RV64-NEXT:  entry:
391 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
392 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vle.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], i64 [[VL:%.*]])
393 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
394 //
test_vle32_v_u32m4(const uint32_t * base,size_t vl)395 vuint32m4_t test_vle32_v_u32m4(const uint32_t *base, size_t vl) {
396   return vle32_v_u32m4(base, vl);
397 }
398 
399 // CHECK-RV64-LABEL: @test_vle32_v_u32m8(
400 // CHECK-RV64-NEXT:  entry:
401 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
402 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], i64 [[VL:%.*]])
403 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
404 //
test_vle32_v_u32m8(const uint32_t * base,size_t vl)405 vuint32m8_t test_vle32_v_u32m8(const uint32_t *base, size_t vl) {
406   return vle32_v_u32m8(base, vl);
407 }
408 
409 // CHECK-RV64-LABEL: @test_vle64_v_u64m1(
410 // CHECK-RV64-NEXT:  entry:
411 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
412 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], i64 [[VL:%.*]])
413 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
414 //
test_vle64_v_u64m1(const uint64_t * base,size_t vl)415 vuint64m1_t test_vle64_v_u64m1(const uint64_t *base, size_t vl) {
416   return vle64_v_u64m1(base, vl);
417 }
418 
419 // CHECK-RV64-LABEL: @test_vle64_v_u64m2(
420 // CHECK-RV64-NEXT:  entry:
421 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
422 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], i64 [[VL:%.*]])
423 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
424 //
test_vle64_v_u64m2(const uint64_t * base,size_t vl)425 vuint64m2_t test_vle64_v_u64m2(const uint64_t *base, size_t vl) {
426   return vle64_v_u64m2(base, vl);
427 }
428 
429 // CHECK-RV64-LABEL: @test_vle64_v_u64m4(
430 // CHECK-RV64-NEXT:  entry:
431 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
432 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], i64 [[VL:%.*]])
433 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
434 //
test_vle64_v_u64m4(const uint64_t * base,size_t vl)435 vuint64m4_t test_vle64_v_u64m4(const uint64_t *base, size_t vl) {
436   return vle64_v_u64m4(base, vl);
437 }
438 
439 // CHECK-RV64-LABEL: @test_vle64_v_u64m8(
440 // CHECK-RV64-NEXT:  entry:
441 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
442 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], i64 [[VL:%.*]])
443 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
444 //
test_vle64_v_u64m8(const uint64_t * base,size_t vl)445 vuint64m8_t test_vle64_v_u64m8(const uint64_t *base, size_t vl) {
446   return vle64_v_u64m8(base, vl);
447 }
448 
449 // CHECK-RV64-LABEL: @test_vle32_v_f32mf2(
450 // CHECK-RV64-NEXT:  entry:
451 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
452 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vle.nxv1f32.i64(<vscale x 1 x float>* [[TMP0]], i64 [[VL:%.*]])
453 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
454 //
test_vle32_v_f32mf2(const float * base,size_t vl)455 vfloat32mf2_t test_vle32_v_f32mf2(const float *base, size_t vl) {
456   return vle32_v_f32mf2(base, vl);
457 }
458 
459 // CHECK-RV64-LABEL: @test_vle32_v_f32m1(
460 // CHECK-RV64-NEXT:  entry:
461 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
462 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vle.nxv2f32.i64(<vscale x 2 x float>* [[TMP0]], i64 [[VL:%.*]])
463 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
464 //
test_vle32_v_f32m1(const float * base,size_t vl)465 vfloat32m1_t test_vle32_v_f32m1(const float *base, size_t vl) {
466   return vle32_v_f32m1(base, vl);
467 }
468 
469 // CHECK-RV64-LABEL: @test_vle32_v_f32m2(
470 // CHECK-RV64-NEXT:  entry:
471 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
472 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vle.nxv4f32.i64(<vscale x 4 x float>* [[TMP0]], i64 [[VL:%.*]])
473 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
474 //
test_vle32_v_f32m2(const float * base,size_t vl)475 vfloat32m2_t test_vle32_v_f32m2(const float *base, size_t vl) {
476   return vle32_v_f32m2(base, vl);
477 }
478 
479 // CHECK-RV64-LABEL: @test_vle32_v_f32m4(
480 // CHECK-RV64-NEXT:  entry:
481 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
482 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vle.nxv8f32.i64(<vscale x 8 x float>* [[TMP0]], i64 [[VL:%.*]])
483 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
484 //
test_vle32_v_f32m4(const float * base,size_t vl)485 vfloat32m4_t test_vle32_v_f32m4(const float *base, size_t vl) {
486   return vle32_v_f32m4(base, vl);
487 }
488 
489 // CHECK-RV64-LABEL: @test_vle32_v_f32m8(
490 // CHECK-RV64-NEXT:  entry:
491 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
492 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vle.nxv16f32.i64(<vscale x 16 x float>* [[TMP0]], i64 [[VL:%.*]])
493 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
494 //
test_vle32_v_f32m8(const float * base,size_t vl)495 vfloat32m8_t test_vle32_v_f32m8(const float *base, size_t vl) {
496   return vle32_v_f32m8(base, vl);
497 }
498 
499 // CHECK-RV64-LABEL: @test_vle64_v_f64m1(
500 // CHECK-RV64-NEXT:  entry:
501 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
502 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vle.nxv1f64.i64(<vscale x 1 x double>* [[TMP0]], i64 [[VL:%.*]])
503 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
504 //
test_vle64_v_f64m1(const double * base,size_t vl)505 vfloat64m1_t test_vle64_v_f64m1(const double *base, size_t vl) {
506   return vle64_v_f64m1(base, vl);
507 }
508 
509 // CHECK-RV64-LABEL: @test_vle64_v_f64m2(
510 // CHECK-RV64-NEXT:  entry:
511 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
512 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vle.nxv2f64.i64(<vscale x 2 x double>* [[TMP0]], i64 [[VL:%.*]])
513 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
514 //
test_vle64_v_f64m2(const double * base,size_t vl)515 vfloat64m2_t test_vle64_v_f64m2(const double *base, size_t vl) {
516   return vle64_v_f64m2(base, vl);
517 }
518 
519 // CHECK-RV64-LABEL: @test_vle64_v_f64m4(
520 // CHECK-RV64-NEXT:  entry:
521 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
522 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vle.nxv4f64.i64(<vscale x 4 x double>* [[TMP0]], i64 [[VL:%.*]])
523 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
524 //
test_vle64_v_f64m4(const double * base,size_t vl)525 vfloat64m4_t test_vle64_v_f64m4(const double *base, size_t vl) {
526   return vle64_v_f64m4(base, vl);
527 }
528 
529 // CHECK-RV64-LABEL: @test_vle64_v_f64m8(
530 // CHECK-RV64-NEXT:  entry:
531 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
532 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vle.nxv8f64.i64(<vscale x 8 x double>* [[TMP0]], i64 [[VL:%.*]])
533 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
534 //
test_vle64_v_f64m8(const double * base,size_t vl)535 vfloat64m8_t test_vle64_v_f64m8(const double *base, size_t vl) {
536   return vle64_v_f64m8(base, vl);
537 }
538 
539 // CHECK-RV64-LABEL: @test_vle8_v_i8mf8_m(
540 // CHECK-RV64-NEXT:  entry:
541 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
542 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
543 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
544 //
test_vle8_v_i8mf8_m(vbool64_t mask,vint8mf8_t maskedoff,const int8_t * base,size_t vl)545 vint8mf8_t test_vle8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) {
546   return vle8_v_i8mf8_m(mask, maskedoff, base, vl);
547 }
548 
549 // CHECK-RV64-LABEL: @test_vle8_v_i8mf4_m(
550 // CHECK-RV64-NEXT:  entry:
551 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
552 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
553 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
554 //
test_vle8_v_i8mf4_m(vbool32_t mask,vint8mf4_t maskedoff,const int8_t * base,size_t vl)555 vint8mf4_t test_vle8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl) {
556   return vle8_v_i8mf4_m(mask, maskedoff, base, vl);
557 }
558 
559 // CHECK-RV64-LABEL: @test_vle8_v_i8mf2_m(
560 // CHECK-RV64-NEXT:  entry:
561 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
562 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
563 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
564 //
test_vle8_v_i8mf2_m(vbool16_t mask,vint8mf2_t maskedoff,const int8_t * base,size_t vl)565 vint8mf2_t test_vle8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl) {
566   return vle8_v_i8mf2_m(mask, maskedoff, base, vl);
567 }
568 
569 // CHECK-RV64-LABEL: @test_vle8_v_i8m1_m(
570 // CHECK-RV64-NEXT:  entry:
571 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
572 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
573 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
574 //
test_vle8_v_i8m1_m(vbool8_t mask,vint8m1_t maskedoff,const int8_t * base,size_t vl)575 vint8m1_t test_vle8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl) {
576   return vle8_v_i8m1_m(mask, maskedoff, base, vl);
577 }
578 
579 // CHECK-RV64-LABEL: @test_vle8_v_i8m2_m(
580 // CHECK-RV64-NEXT:  entry:
581 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
582 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
583 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
584 //
test_vle8_v_i8m2_m(vbool4_t mask,vint8m2_t maskedoff,const int8_t * base,size_t vl)585 vint8m2_t test_vle8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl) {
586   return vle8_v_i8m2_m(mask, maskedoff, base, vl);
587 }
588 
589 // CHECK-RV64-LABEL: @test_vle8_v_i8m4_m(
590 // CHECK-RV64-NEXT:  entry:
591 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
592 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
593 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
594 //
test_vle8_v_i8m4_m(vbool2_t mask,vint8m4_t maskedoff,const int8_t * base,size_t vl)595 vint8m4_t test_vle8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl) {
596   return vle8_v_i8m4_m(mask, maskedoff, base, vl);
597 }
598 
599 // CHECK-RV64-LABEL: @test_vle8_v_i8m8_m(
600 // CHECK-RV64-NEXT:  entry:
601 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
602 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
603 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP1]]
604 //
test_vle8_v_i8m8_m(vbool1_t mask,vint8m8_t maskedoff,const int8_t * base,size_t vl)605 vint8m8_t test_vle8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl) {
606   return vle8_v_i8m8_m(mask, maskedoff, base, vl);
607 }
608 
609 // CHECK-RV64-LABEL: @test_vle16_v_i16mf4_m(
610 // CHECK-RV64-NEXT:  entry:
611 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
612 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
613 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
614 //
test_vle16_v_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,const int16_t * base,size_t vl)615 vint16mf4_t test_vle16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl) {
616   return vle16_v_i16mf4_m(mask, maskedoff, base, vl);
617 }
618 
619 // CHECK-RV64-LABEL: @test_vle16_v_i16mf2_m(
620 // CHECK-RV64-NEXT:  entry:
621 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
622 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
623 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
624 //
test_vle16_v_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,const int16_t * base,size_t vl)625 vint16mf2_t test_vle16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl) {
626   return vle16_v_i16mf2_m(mask, maskedoff, base, vl);
627 }
628 
629 // CHECK-RV64-LABEL: @test_vle16_v_i16m1_m(
630 // CHECK-RV64-NEXT:  entry:
631 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
632 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
633 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
634 //
test_vle16_v_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,const int16_t * base,size_t vl)635 vint16m1_t test_vle16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl) {
636   return vle16_v_i16m1_m(mask, maskedoff, base, vl);
637 }
638 
639 // CHECK-RV64-LABEL: @test_vle16_v_i16m2_m(
640 // CHECK-RV64-NEXT:  entry:
641 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
642 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
643 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
644 //
test_vle16_v_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,const int16_t * base,size_t vl)645 vint16m2_t test_vle16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl) {
646   return vle16_v_i16m2_m(mask, maskedoff, base, vl);
647 }
648 
649 // CHECK-RV64-LABEL: @test_vle16_v_i16m4_m(
650 // CHECK-RV64-NEXT:  entry:
651 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
652 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
653 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
654 //
test_vle16_v_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,const int16_t * base,size_t vl)655 vint16m4_t test_vle16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl) {
656   return vle16_v_i16m4_m(mask, maskedoff, base, vl);
657 }
658 
659 // CHECK-RV64-LABEL: @test_vle16_v_i16m8_m(
660 // CHECK-RV64-NEXT:  entry:
661 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
662 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
663 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
664 //
test_vle16_v_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,const int16_t * base,size_t vl)665 vint16m8_t test_vle16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl) {
666   return vle16_v_i16m8_m(mask, maskedoff, base, vl);
667 }
668 
669 // CHECK-RV64-LABEL: @test_vle32_v_i32mf2_m(
670 // CHECK-RV64-NEXT:  entry:
671 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
672 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
673 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
674 //
test_vle32_v_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,const int32_t * base,size_t vl)675 vint32mf2_t test_vle32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl) {
676   return vle32_v_i32mf2_m(mask, maskedoff, base, vl);
677 }
678 
679 // CHECK-RV64-LABEL: @test_vle32_v_i32m1_m(
680 // CHECK-RV64-NEXT:  entry:
681 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
682 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
683 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
684 //
test_vle32_v_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,const int32_t * base,size_t vl)685 vint32m1_t test_vle32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl) {
686   return vle32_v_i32m1_m(mask, maskedoff, base, vl);
687 }
688 
689 // CHECK-RV64-LABEL: @test_vle32_v_i32m2_m(
690 // CHECK-RV64-NEXT:  entry:
691 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
692 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
693 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
694 //
test_vle32_v_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,const int32_t * base,size_t vl)695 vint32m2_t test_vle32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl) {
696   return vle32_v_i32m2_m(mask, maskedoff, base, vl);
697 }
698 
699 // CHECK-RV64-LABEL: @test_vle32_v_i32m4_m(
700 // CHECK-RV64-NEXT:  entry:
701 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
702 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
703 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
704 //
test_vle32_v_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,const int32_t * base,size_t vl)705 vint32m4_t test_vle32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl) {
706   return vle32_v_i32m4_m(mask, maskedoff, base, vl);
707 }
708 
709 // CHECK-RV64-LABEL: @test_vle32_v_i32m8_m(
710 // CHECK-RV64-NEXT:  entry:
711 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
712 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
713 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
714 //
test_vle32_v_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,const int32_t * base,size_t vl)715 vint32m8_t test_vle32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl) {
716   return vle32_v_i32m8_m(mask, maskedoff, base, vl);
717 }
718 
719 // CHECK-RV64-LABEL: @test_vle64_v_i64m1_m(
720 // CHECK-RV64-NEXT:  entry:
721 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
722 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
723 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
724 //
test_vle64_v_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,const int64_t * base,size_t vl)725 vint64m1_t test_vle64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl) {
726   return vle64_v_i64m1_m(mask, maskedoff, base, vl);
727 }
728 
729 // CHECK-RV64-LABEL: @test_vle64_v_i64m2_m(
730 // CHECK-RV64-NEXT:  entry:
731 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
732 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
733 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
734 //
test_vle64_v_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,const int64_t * base,size_t vl)735 vint64m2_t test_vle64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl) {
736   return vle64_v_i64m2_m(mask, maskedoff, base, vl);
737 }
738 
739 // CHECK-RV64-LABEL: @test_vle64_v_i64m4_m(
740 // CHECK-RV64-NEXT:  entry:
741 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
742 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
743 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
744 //
test_vle64_v_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,const int64_t * base,size_t vl)745 vint64m4_t test_vle64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl) {
746   return vle64_v_i64m4_m(mask, maskedoff, base, vl);
747 }
748 
749 // CHECK-RV64-LABEL: @test_vle64_v_i64m8_m(
750 // CHECK-RV64-NEXT:  entry:
751 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
752 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
753 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
754 //
test_vle64_v_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,const int64_t * base,size_t vl)755 vint64m8_t test_vle64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl) {
756   return vle64_v_i64m8_m(mask, maskedoff, base, vl);
757 }
758 
759 // CHECK-RV64-LABEL: @test_vle8_v_u8mf8_m(
760 // CHECK-RV64-NEXT:  entry:
761 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
762 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
763 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP1]]
764 //
test_vle8_v_u8mf8_m(vbool64_t mask,vuint8mf8_t maskedoff,const uint8_t * base,size_t vl)765 vuint8mf8_t test_vle8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) {
766   return vle8_v_u8mf8_m(mask, maskedoff, base, vl);
767 }
768 
769 // CHECK-RV64-LABEL: @test_vle8_v_u8mf4_m(
770 // CHECK-RV64-NEXT:  entry:
771 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
772 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
773 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP1]]
774 //
test_vle8_v_u8mf4_m(vbool32_t mask,vuint8mf4_t maskedoff,const uint8_t * base,size_t vl)775 vuint8mf4_t test_vle8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) {
776   return vle8_v_u8mf4_m(mask, maskedoff, base, vl);
777 }
778 
779 // CHECK-RV64-LABEL: @test_vle8_v_u8mf2_m(
780 // CHECK-RV64-NEXT:  entry:
781 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
782 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
783 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP1]]
784 //
test_vle8_v_u8mf2_m(vbool16_t mask,vuint8mf2_t maskedoff,const uint8_t * base,size_t vl)785 vuint8mf2_t test_vle8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) {
786   return vle8_v_u8mf2_m(mask, maskedoff, base, vl);
787 }
788 
789 // CHECK-RV64-LABEL: @test_vle8_v_u8m1_m(
790 // CHECK-RV64-NEXT:  entry:
791 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
792 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
793 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP1]]
794 //
test_vle8_v_u8m1_m(vbool8_t mask,vuint8m1_t maskedoff,const uint8_t * base,size_t vl)795 vuint8m1_t test_vle8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl) {
796   return vle8_v_u8m1_m(mask, maskedoff, base, vl);
797 }
798 
799 // CHECK-RV64-LABEL: @test_vle8_v_u8m2_m(
800 // CHECK-RV64-NEXT:  entry:
801 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
802 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
803 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
804 //
test_vle8_v_u8m2_m(vbool4_t mask,vuint8m2_t maskedoff,const uint8_t * base,size_t vl)805 vuint8m2_t test_vle8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl) {
806   return vle8_v_u8m2_m(mask, maskedoff, base, vl);
807 }
808 
809 // CHECK-RV64-LABEL: @test_vle8_v_u8m4_m(
810 // CHECK-RV64-NEXT:  entry:
811 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
812 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
813 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP1]]
814 //
test_vle8_v_u8m4_m(vbool2_t mask,vuint8m4_t maskedoff,const uint8_t * base,size_t vl)815 vuint8m4_t test_vle8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl) {
816   return vle8_v_u8m4_m(mask, maskedoff, base, vl);
817 }
818 
819 // CHECK-RV64-LABEL: @test_vle8_v_u8m8_m(
820 // CHECK-RV64-NEXT:  entry:
821 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
822 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
823 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP1]]
824 //
test_vle8_v_u8m8_m(vbool1_t mask,vuint8m8_t maskedoff,const uint8_t * base,size_t vl)825 vuint8m8_t test_vle8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl) {
826   return vle8_v_u8m8_m(mask, maskedoff, base, vl);
827 }
828 
829 // CHECK-RV64-LABEL: @test_vle16_v_u16mf4_m(
830 // CHECK-RV64-NEXT:  entry:
831 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
832 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
833 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP1]]
834 //
test_vle16_v_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,const uint16_t * base,size_t vl)835 vuint16mf4_t test_vle16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) {
836   return vle16_v_u16mf4_m(mask, maskedoff, base, vl);
837 }
838 
839 // CHECK-RV64-LABEL: @test_vle16_v_u16mf2_m(
840 // CHECK-RV64-NEXT:  entry:
841 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
842 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
843 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP1]]
844 //
test_vle16_v_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,const uint16_t * base,size_t vl)845 vuint16mf2_t test_vle16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) {
846   return vle16_v_u16mf2_m(mask, maskedoff, base, vl);
847 }
848 
849 // CHECK-RV64-LABEL: @test_vle16_v_u16m1_m(
850 // CHECK-RV64-NEXT:  entry:
851 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
852 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
853 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP1]]
854 //
test_vle16_v_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,const uint16_t * base,size_t vl)855 vuint16m1_t test_vle16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl) {
856   return vle16_v_u16m1_m(mask, maskedoff, base, vl);
857 }
858 
859 // CHECK-RV64-LABEL: @test_vle16_v_u16m2_m(
860 // CHECK-RV64-NEXT:  entry:
861 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
862 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
863 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
864 //
test_vle16_v_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,const uint16_t * base,size_t vl)865 vuint16m2_t test_vle16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl) {
866   return vle16_v_u16m2_m(mask, maskedoff, base, vl);
867 }
868 
869 // CHECK-RV64-LABEL: @test_vle16_v_u16m4_m(
870 // CHECK-RV64-NEXT:  entry:
871 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
872 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
873 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP1]]
874 //
test_vle16_v_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,const uint16_t * base,size_t vl)875 vuint16m4_t test_vle16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl) {
876   return vle16_v_u16m4_m(mask, maskedoff, base, vl);
877 }
878 
879 // CHECK-RV64-LABEL: @test_vle16_v_u16m8_m(
880 // CHECK-RV64-NEXT:  entry:
881 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
882 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
883 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP1]]
884 //
test_vle16_v_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,const uint16_t * base,size_t vl)885 vuint16m8_t test_vle16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl) {
886   return vle16_v_u16m8_m(mask, maskedoff, base, vl);
887 }
888 
889 // CHECK-RV64-LABEL: @test_vle32_v_u32mf2_m(
890 // CHECK-RV64-NEXT:  entry:
891 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
892 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
893 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
894 //
test_vle32_v_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,const uint32_t * base,size_t vl)895 vuint32mf2_t test_vle32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) {
896   return vle32_v_u32mf2_m(mask, maskedoff, base, vl);
897 }
898 
899 // CHECK-RV64-LABEL: @test_vle32_v_u32m1_m(
900 // CHECK-RV64-NEXT:  entry:
901 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
902 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
903 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
904 //
test_vle32_v_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,const uint32_t * base,size_t vl)905 vuint32m1_t test_vle32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl) {
906   return vle32_v_u32m1_m(mask, maskedoff, base, vl);
907 }
908 
909 // CHECK-RV64-LABEL: @test_vle32_v_u32m2_m(
910 // CHECK-RV64-NEXT:  entry:
911 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
912 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
913 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
914 //
test_vle32_v_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,const uint32_t * base,size_t vl)915 vuint32m2_t test_vle32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl) {
916   return vle32_v_u32m2_m(mask, maskedoff, base, vl);
917 }
918 
919 // CHECK-RV64-LABEL: @test_vle32_v_u32m4_m(
920 // CHECK-RV64-NEXT:  entry:
921 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
922 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
923 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
924 //
test_vle32_v_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,const uint32_t * base,size_t vl)925 vuint32m4_t test_vle32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl) {
926   return vle32_v_u32m4_m(mask, maskedoff, base, vl);
927 }
928 
929 // CHECK-RV64-LABEL: @test_vle32_v_u32m8_m(
930 // CHECK-RV64-NEXT:  entry:
931 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
932 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
933 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
934 //
test_vle32_v_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,const uint32_t * base,size_t vl)935 vuint32m8_t test_vle32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl) {
936   return vle32_v_u32m8_m(mask, maskedoff, base, vl);
937 }
938 
939 // CHECK-RV64-LABEL: @test_vle64_v_u64m1_m(
940 // CHECK-RV64-NEXT:  entry:
941 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
942 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
943 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
944 //
test_vle64_v_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,const uint64_t * base,size_t vl)945 vuint64m1_t test_vle64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl) {
946   return vle64_v_u64m1_m(mask, maskedoff, base, vl);
947 }
948 
949 // CHECK-RV64-LABEL: @test_vle64_v_u64m2_m(
950 // CHECK-RV64-NEXT:  entry:
951 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
952 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
953 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
954 //
test_vle64_v_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,const uint64_t * base,size_t vl)955 vuint64m2_t test_vle64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl) {
956   return vle64_v_u64m2_m(mask, maskedoff, base, vl);
957 }
958 
959 // CHECK-RV64-LABEL: @test_vle64_v_u64m4_m(
960 // CHECK-RV64-NEXT:  entry:
961 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
962 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
963 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
964 //
test_vle64_v_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,const uint64_t * base,size_t vl)965 vuint64m4_t test_vle64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl) {
966   return vle64_v_u64m4_m(mask, maskedoff, base, vl);
967 }
968 
969 // CHECK-RV64-LABEL: @test_vle64_v_u64m8_m(
970 // CHECK-RV64-NEXT:  entry:
971 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
972 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
973 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
974 //
test_vle64_v_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,const uint64_t * base,size_t vl)975 vuint64m8_t test_vle64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl) {
976   return vle64_v_u64m8_m(mask, maskedoff, base, vl);
977 }
978 
979 // CHECK-RV64-LABEL: @test_vle32_v_f32mf2_m(
980 // CHECK-RV64-NEXT:  entry:
981 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
982 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vle.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
983 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
984 //
test_vle32_v_f32mf2_m(vbool64_t mask,vfloat32mf2_t maskedoff,const float * base,size_t vl)985 vfloat32mf2_t test_vle32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t vl) {
986   return vle32_v_f32mf2_m(mask, maskedoff, base, vl);
987 }
988 
989 // CHECK-RV64-LABEL: @test_vle32_v_f32m1_m(
990 // CHECK-RV64-NEXT:  entry:
991 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
992 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vle.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
993 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
994 //
test_vle32_v_f32m1_m(vbool32_t mask,vfloat32m1_t maskedoff,const float * base,size_t vl)995 vfloat32m1_t test_vle32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t vl) {
996   return vle32_v_f32m1_m(mask, maskedoff, base, vl);
997 }
998 
999 // CHECK-RV64-LABEL: @test_vle32_v_f32m2_m(
1000 // CHECK-RV64-NEXT:  entry:
1001 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
1002 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vle.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1003 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
1004 //
test_vle32_v_f32m2_m(vbool16_t mask,vfloat32m2_t maskedoff,const float * base,size_t vl)1005 vfloat32m2_t test_vle32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t vl) {
1006   return vle32_v_f32m2_m(mask, maskedoff, base, vl);
1007 }
1008 
1009 // CHECK-RV64-LABEL: @test_vle32_v_f32m4_m(
1010 // CHECK-RV64-NEXT:  entry:
1011 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
1012 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vle.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1013 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
1014 //
test_vle32_v_f32m4_m(vbool8_t mask,vfloat32m4_t maskedoff,const float * base,size_t vl)1015 vfloat32m4_t test_vle32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t vl) {
1016   return vle32_v_f32m4_m(mask, maskedoff, base, vl);
1017 }
1018 
1019 // CHECK-RV64-LABEL: @test_vle32_v_f32m8_m(
1020 // CHECK-RV64-NEXT:  entry:
1021 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
1022 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vle.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1023 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
1024 //
test_vle32_v_f32m8_m(vbool4_t mask,vfloat32m8_t maskedoff,const float * base,size_t vl)1025 vfloat32m8_t test_vle32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t vl) {
1026   return vle32_v_f32m8_m(mask, maskedoff, base, vl);
1027 }
1028 
1029 // CHECK-RV64-LABEL: @test_vle64_v_f64m1_m(
1030 // CHECK-RV64-NEXT:  entry:
1031 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
1032 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1033 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
1034 //
test_vle64_v_f64m1_m(vbool64_t mask,vfloat64m1_t maskedoff,const double * base,size_t vl)1035 vfloat64m1_t test_vle64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t vl) {
1036   return vle64_v_f64m1_m(mask, maskedoff, base, vl);
1037 }
1038 
1039 // CHECK-RV64-LABEL: @test_vle64_v_f64m2_m(
1040 // CHECK-RV64-NEXT:  entry:
1041 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
1042 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1043 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
1044 //
test_vle64_v_f64m2_m(vbool32_t mask,vfloat64m2_t maskedoff,const double * base,size_t vl)1045 vfloat64m2_t test_vle64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t vl) {
1046   return vle64_v_f64m2_m(mask, maskedoff, base, vl);
1047 }
1048 
1049 // CHECK-RV64-LABEL: @test_vle64_v_f64m4_m(
1050 // CHECK-RV64-NEXT:  entry:
1051 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
1052 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1053 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
1054 //
test_vle64_v_f64m4_m(vbool16_t mask,vfloat64m4_t maskedoff,const double * base,size_t vl)1055 vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t vl) {
1056   return vle64_v_f64m4_m(mask, maskedoff, base, vl);
1057 }
1058 
1059 // CHECK-RV64-LABEL: @test_vle64_v_f64m8_m(
1060 // CHECK-RV64-NEXT:  entry:
1061 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
1062 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1063 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
1064 //
test_vle64_v_f64m8_m(vbool8_t mask,vfloat64m8_t maskedoff,const double * base,size_t vl)1065 vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl) {
1066   return vle64_v_f64m8_m(mask, maskedoff, base, vl);
1067 }
1068 
1069 // CHECK-RV64-LABEL: @test_vle1_v_b1(
1070 // CHECK-RV64-NEXT:  entry:
1071 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i1>*
1072 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vle1.nxv64i1.i64(<vscale x 64 x i1>* [[TMP0]], i64 [[VL:%.*]])
1073 // CHECK-RV64-NEXT:    ret <vscale x 64 x i1> [[TMP1]]
1074 //
test_vle1_v_b1(const uint8_t * base,size_t vl)1075 vbool1_t test_vle1_v_b1(const uint8_t *base, size_t vl) {
1076   return vle1_v_b1(base, vl);
1077 }
1078 
1079 // CHECK-RV64-LABEL: @test_vle1_v_b2(
1080 // CHECK-RV64-NEXT:  entry:
1081 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i1>*
1082 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vle1.nxv32i1.i64(<vscale x 32 x i1>* [[TMP0]], i64 [[VL:%.*]])
1083 // CHECK-RV64-NEXT:    ret <vscale x 32 x i1> [[TMP1]]
1084 //
test_vle1_v_b2(const uint8_t * base,size_t vl)1085 vbool2_t test_vle1_v_b2(const uint8_t *base, size_t vl) {
1086   return vle1_v_b2(base, vl);
1087 }
1088 
1089 // CHECK-RV64-LABEL: @test_vle1_v_b4(
1090 // CHECK-RV64-NEXT:  entry:
1091 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i1>*
1092 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vle1.nxv16i1.i64(<vscale x 16 x i1>* [[TMP0]], i64 [[VL:%.*]])
1093 // CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP1]]
1094 //
test_vle1_v_b4(const uint8_t * base,size_t vl)1095 vbool4_t test_vle1_v_b4(const uint8_t *base, size_t vl) {
1096   return vle1_v_b4(base, vl);
1097 }
1098 
1099 // CHECK-RV64-LABEL: @test_vle1_v_b8(
1100 // CHECK-RV64-NEXT:  entry:
1101 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i1>*
1102 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vle1.nxv8i1.i64(<vscale x 8 x i1>* [[TMP0]], i64 [[VL:%.*]])
1103 // CHECK-RV64-NEXT:    ret <vscale x 8 x i1> [[TMP1]]
1104 //
test_vle1_v_b8(const uint8_t * base,size_t vl)1105 vbool8_t test_vle1_v_b8(const uint8_t *base, size_t vl) {
1106   return vle1_v_b8(base, vl);
1107 }
1108 
1109 // CHECK-RV64-LABEL: @test_vle1_v_b16(
1110 // CHECK-RV64-NEXT:  entry:
1111 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i1>*
1112 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vle1.nxv4i1.i64(<vscale x 4 x i1>* [[TMP0]], i64 [[VL:%.*]])
1113 // CHECK-RV64-NEXT:    ret <vscale x 4 x i1> [[TMP1]]
1114 //
test_vle1_v_b16(const uint8_t * base,size_t vl)1115 vbool16_t test_vle1_v_b16(const uint8_t *base, size_t vl) {
1116   return vle1_v_b16(base, vl);
1117 }
1118 
1119 // CHECK-RV64-LABEL: @test_vle1_v_b32(
1120 // CHECK-RV64-NEXT:  entry:
1121 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i1>*
1122 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vle1.nxv2i1.i64(<vscale x 2 x i1>* [[TMP0]], i64 [[VL:%.*]])
1123 // CHECK-RV64-NEXT:    ret <vscale x 2 x i1> [[TMP1]]
1124 //
test_vle1_v_b32(const uint8_t * base,size_t vl)1125 vbool32_t test_vle1_v_b32(const uint8_t *base, size_t vl) {
1126   return vle1_v_b32(base, vl);
1127 }
1128 
1129 // CHECK-RV64-LABEL: @test_vle1_v_b64(
1130 // CHECK-RV64-NEXT:  entry:
1131 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i1>*
1132 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vle1.nxv1i1.i64(<vscale x 1 x i1>* [[TMP0]], i64 [[VL:%.*]])
1133 // CHECK-RV64-NEXT:    ret <vscale x 1 x i1> [[TMP1]]
1134 //
test_vle1_v_b64(const uint8_t * base,size_t vl)1135 vbool64_t test_vle1_v_b64(const uint8_t *base, size_t vl) {
1136   return vle1_v_b64(base, vl);
1137 }
1138 
1139 // CHECK-RV64-LABEL: @test_vle16_v_f16mf4(
1140 // CHECK-RV64-NEXT:  entry:
1141 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
1142 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vle.nxv1f16.i64(<vscale x 1 x half>* [[TMP0]], i64 [[VL:%.*]])
1143 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP1]]
1144 //
test_vle16_v_f16mf4(const _Float16 * base,size_t vl)1145 vfloat16mf4_t test_vle16_v_f16mf4(const _Float16 *base, size_t vl) {
1146   return vle16_v_f16mf4(base, vl);
1147 }
1148 
1149 // CHECK-RV64-LABEL: @test_vle16_v_f16mf2(
1150 // CHECK-RV64-NEXT:  entry:
1151 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
1152 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vle.nxv2f16.i64(<vscale x 2 x half>* [[TMP0]], i64 [[VL:%.*]])
1153 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP1]]
1154 //
test_vle16_v_f16mf2(const _Float16 * base,size_t vl)1155 vfloat16mf2_t test_vle16_v_f16mf2(const _Float16 *base, size_t vl) {
1156   return vle16_v_f16mf2(base, vl);
1157 }
1158 
1159 // CHECK-RV64-LABEL: @test_vle16_v_f16m1(
1160 // CHECK-RV64-NEXT:  entry:
1161 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
1162 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vle.nxv4f16.i64(<vscale x 4 x half>* [[TMP0]], i64 [[VL:%.*]])
1163 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP1]]
1164 //
test_vle16_v_f16m1(const _Float16 * base,size_t vl)1165 vfloat16m1_t test_vle16_v_f16m1(const _Float16 *base, size_t vl) {
1166   return vle16_v_f16m1(base, vl);
1167 }
1168 
1169 // CHECK-RV64-LABEL: @test_vle16_v_f16m2(
1170 // CHECK-RV64-NEXT:  entry:
1171 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
1172 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vle.nxv8f16.i64(<vscale x 8 x half>* [[TMP0]], i64 [[VL:%.*]])
1173 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP1]]
1174 //
test_vle16_v_f16m2(const _Float16 * base,size_t vl)1175 vfloat16m2_t test_vle16_v_f16m2(const _Float16 *base, size_t vl) {
1176   return vle16_v_f16m2(base, vl);
1177 }
1178 
1179 // CHECK-RV64-LABEL: @test_vle16_v_f16m4(
1180 // CHECK-RV64-NEXT:  entry:
1181 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
1182 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vle.nxv16f16.i64(<vscale x 16 x half>* [[TMP0]], i64 [[VL:%.*]])
1183 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP1]]
1184 //
test_vle16_v_f16m4(const _Float16 * base,size_t vl)1185 vfloat16m4_t test_vle16_v_f16m4(const _Float16 *base, size_t vl) {
1186   return vle16_v_f16m4(base, vl);
1187 }
1188 
1189 // CHECK-RV64-LABEL: @test_vle16_v_f16m8(
1190 // CHECK-RV64-NEXT:  entry:
1191 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
1192 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vle.nxv32f16.i64(<vscale x 32 x half>* [[TMP0]], i64 [[VL:%.*]])
1193 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP1]]
1194 //
test_vle16_v_f16m8(const _Float16 * base,size_t vl)1195 vfloat16m8_t test_vle16_v_f16m8(const _Float16 *base, size_t vl) {
1196   return vle16_v_f16m8(base, vl);
1197 }
1198