1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
4 // RUN:   -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s \
5 // RUN:   -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
6 
7 #include <riscv_vector.h>
8 
9 //
10 // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf8(
11 // CHECK-RV64-NEXT:  entry:
12 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
13 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 1 x i8>, i64 } @llvm.riscv.vleff.nxv1i8.i64(<vscale x 1 x i8>* [[TMP0]], i64 [[VL:%.*]])
14 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i8>, i64 } [[TMP1]], 0
15 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i8>, i64 } [[TMP1]], 1
16 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
17 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP2]]
18 //
test_vle8ff_v_i8mf8(const int8_t * base,size_t * new_vl,size_t vl)19 vint8mf8_t test_vle8ff_v_i8mf8 (const int8_t *base, size_t *new_vl, size_t vl) {
20   return vle8ff_v_i8mf8(base, new_vl, vl);
21 }
22 
23 //
24 // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf4(
25 // CHECK-RV64-NEXT:  entry:
26 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
27 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 2 x i8>, i64 } @llvm.riscv.vleff.nxv2i8.i64(<vscale x 2 x i8>* [[TMP0]], i64 [[VL:%.*]])
28 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x i8>, i64 } [[TMP1]], 0
29 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x i8>, i64 } [[TMP1]], 1
30 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
31 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP2]]
32 //
test_vle8ff_v_i8mf4(const int8_t * base,size_t * new_vl,size_t vl)33 vint8mf4_t test_vle8ff_v_i8mf4 (const int8_t *base, size_t *new_vl, size_t vl) {
34   return vle8ff_v_i8mf4(base, new_vl, vl);
35 }
36 
37 //
38 // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf2(
39 // CHECK-RV64-NEXT:  entry:
40 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
41 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 4 x i8>, i64 } @llvm.riscv.vleff.nxv4i8.i64(<vscale x 4 x i8>* [[TMP0]], i64 [[VL:%.*]])
42 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x i8>, i64 } [[TMP1]], 0
43 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x i8>, i64 } [[TMP1]], 1
44 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
45 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP2]]
46 //
test_vle8ff_v_i8mf2(const int8_t * base,size_t * new_vl,size_t vl)47 vint8mf2_t test_vle8ff_v_i8mf2 (const int8_t *base, size_t *new_vl, size_t vl) {
48   return vle8ff_v_i8mf2(base, new_vl, vl);
49 }
50 
51 //
52 // CHECK-RV64-LABEL: @test_vle8ff_v_i8m1(
53 // CHECK-RV64-NEXT:  entry:
54 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
55 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8.i64(<vscale x 8 x i8>* [[TMP0]], i64 [[VL:%.*]])
56 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x i8>, i64 } [[TMP1]], 0
57 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x i8>, i64 } [[TMP1]], 1
58 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
59 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP2]]
60 //
test_vle8ff_v_i8m1(const int8_t * base,size_t * new_vl,size_t vl)61 vint8m1_t test_vle8ff_v_i8m1 (const int8_t *base, size_t *new_vl, size_t vl) {
62   return vle8ff_v_i8m1(base, new_vl, vl);
63 }
64 
65 //
66 // CHECK-RV64-LABEL: @test_vle8ff_v_i8m2(
67 // CHECK-RV64-NEXT:  entry:
68 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
69 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 16 x i8>, i64 } @llvm.riscv.vleff.nxv16i8.i64(<vscale x 16 x i8>* [[TMP0]], i64 [[VL:%.*]])
70 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x i8>, i64 } [[TMP1]], 0
71 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 16 x i8>, i64 } [[TMP1]], 1
72 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
73 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP2]]
74 //
test_vle8ff_v_i8m2(const int8_t * base,size_t * new_vl,size_t vl)75 vint8m2_t test_vle8ff_v_i8m2 (const int8_t *base, size_t *new_vl, size_t vl) {
76   return vle8ff_v_i8m2(base, new_vl, vl);
77 }
78 
79 //
80 // CHECK-RV64-LABEL: @test_vle8ff_v_i8m4(
81 // CHECK-RV64-NEXT:  entry:
82 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
83 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 32 x i8>, i64 } @llvm.riscv.vleff.nxv32i8.i64(<vscale x 32 x i8>* [[TMP0]], i64 [[VL:%.*]])
84 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 32 x i8>, i64 } [[TMP1]], 0
85 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 32 x i8>, i64 } [[TMP1]], 1
86 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
87 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP2]]
88 //
test_vle8ff_v_i8m4(const int8_t * base,size_t * new_vl,size_t vl)89 vint8m4_t test_vle8ff_v_i8m4 (const int8_t *base, size_t *new_vl, size_t vl) {
90   return vle8ff_v_i8m4(base, new_vl, vl);
91 }
92 
93 //
94 // CHECK-RV64-LABEL: @test_vle8ff_v_i8m8(
95 // CHECK-RV64-NEXT:  entry:
96 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
97 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 64 x i8>, i64 } @llvm.riscv.vleff.nxv64i8.i64(<vscale x 64 x i8>* [[TMP0]], i64 [[VL:%.*]])
98 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 64 x i8>, i64 } [[TMP1]], 0
99 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 64 x i8>, i64 } [[TMP1]], 1
100 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
101 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP2]]
102 //
test_vle8ff_v_i8m8(const int8_t * base,size_t * new_vl,size_t vl)103 vint8m8_t test_vle8ff_v_i8m8 (const int8_t *base, size_t *new_vl, size_t vl) {
104   return vle8ff_v_i8m8(base, new_vl, vl);
105 }
106 
107 //
108 // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf8(
109 // CHECK-RV64-NEXT:  entry:
110 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
111 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 1 x i8>, i64 } @llvm.riscv.vleff.nxv1i8.i64(<vscale x 1 x i8>* [[TMP0]], i64 [[VL:%.*]])
112 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i8>, i64 } [[TMP1]], 0
113 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i8>, i64 } [[TMP1]], 1
114 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
115 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP2]]
116 //
test_vle8ff_v_u8mf8(const uint8_t * base,size_t * new_vl,size_t vl)117 vuint8mf8_t test_vle8ff_v_u8mf8 (const uint8_t *base, size_t *new_vl, size_t vl) {
118   return vle8ff_v_u8mf8(base, new_vl, vl);
119 }
120 
121 //
122 // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf4(
123 // CHECK-RV64-NEXT:  entry:
124 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
125 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 2 x i8>, i64 } @llvm.riscv.vleff.nxv2i8.i64(<vscale x 2 x i8>* [[TMP0]], i64 [[VL:%.*]])
126 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x i8>, i64 } [[TMP1]], 0
127 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x i8>, i64 } [[TMP1]], 1
128 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
129 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP2]]
130 //
test_vle8ff_v_u8mf4(const uint8_t * base,size_t * new_vl,size_t vl)131 vuint8mf4_t test_vle8ff_v_u8mf4 (const uint8_t *base, size_t *new_vl, size_t vl) {
132   return vle8ff_v_u8mf4(base, new_vl, vl);
133 }
134 
135 //
136 // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf2(
137 // CHECK-RV64-NEXT:  entry:
138 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
139 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 4 x i8>, i64 } @llvm.riscv.vleff.nxv4i8.i64(<vscale x 4 x i8>* [[TMP0]], i64 [[VL:%.*]])
140 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x i8>, i64 } [[TMP1]], 0
141 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x i8>, i64 } [[TMP1]], 1
142 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
143 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP2]]
144 //
test_vle8ff_v_u8mf2(const uint8_t * base,size_t * new_vl,size_t vl)145 vuint8mf2_t test_vle8ff_v_u8mf2 (const uint8_t *base, size_t *new_vl, size_t vl) {
146   return vle8ff_v_u8mf2(base, new_vl, vl);
147 }
148 
149 //
150 // CHECK-RV64-LABEL: @test_vle8ff_v_u8m1(
151 // CHECK-RV64-NEXT:  entry:
152 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
153 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8.i64(<vscale x 8 x i8>* [[TMP0]], i64 [[VL:%.*]])
154 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x i8>, i64 } [[TMP1]], 0
155 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x i8>, i64 } [[TMP1]], 1
156 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
157 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP2]]
158 //
test_vle8ff_v_u8m1(const uint8_t * base,size_t * new_vl,size_t vl)159 vuint8m1_t test_vle8ff_v_u8m1 (const uint8_t *base, size_t *new_vl, size_t vl) {
160   return vle8ff_v_u8m1(base, new_vl, vl);
161 }
162 
163 //
164 // CHECK-RV64-LABEL: @test_vle8ff_v_u8m2(
165 // CHECK-RV64-NEXT:  entry:
166 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
167 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 16 x i8>, i64 } @llvm.riscv.vleff.nxv16i8.i64(<vscale x 16 x i8>* [[TMP0]], i64 [[VL:%.*]])
168 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x i8>, i64 } [[TMP1]], 0
169 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 16 x i8>, i64 } [[TMP1]], 1
170 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
171 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP2]]
172 //
test_vle8ff_v_u8m2(const uint8_t * base,size_t * new_vl,size_t vl)173 vuint8m2_t test_vle8ff_v_u8m2 (const uint8_t *base, size_t *new_vl, size_t vl) {
174   return vle8ff_v_u8m2(base, new_vl, vl);
175 }
176 
177 //
178 // CHECK-RV64-LABEL: @test_vle8ff_v_u8m4(
179 // CHECK-RV64-NEXT:  entry:
180 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
181 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 32 x i8>, i64 } @llvm.riscv.vleff.nxv32i8.i64(<vscale x 32 x i8>* [[TMP0]], i64 [[VL:%.*]])
182 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 32 x i8>, i64 } [[TMP1]], 0
183 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 32 x i8>, i64 } [[TMP1]], 1
184 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
185 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP2]]
186 //
test_vle8ff_v_u8m4(const uint8_t * base,size_t * new_vl,size_t vl)187 vuint8m4_t test_vle8ff_v_u8m4 (const uint8_t *base, size_t *new_vl, size_t vl) {
188   return vle8ff_v_u8m4(base, new_vl, vl);
189 }
190 
191 //
192 // CHECK-RV64-LABEL: @test_vle8ff_v_u8m8(
193 // CHECK-RV64-NEXT:  entry:
194 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
195 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 64 x i8>, i64 } @llvm.riscv.vleff.nxv64i8.i64(<vscale x 64 x i8>* [[TMP0]], i64 [[VL:%.*]])
196 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 64 x i8>, i64 } [[TMP1]], 0
197 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 64 x i8>, i64 } [[TMP1]], 1
198 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
199 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP2]]
200 //
test_vle8ff_v_u8m8(const uint8_t * base,size_t * new_vl,size_t vl)201 vuint8m8_t test_vle8ff_v_u8m8 (const uint8_t *base, size_t *new_vl, size_t vl) {
202   return vle8ff_v_u8m8(base, new_vl, vl);
203 }
204 
205 //
206 // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf8_m(
207 // CHECK-RV64-NEXT:  entry:
208 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
209 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 1 x i8>, i64 } @llvm.riscv.vleff.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
210 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i8>, i64 } [[TMP1]], 0
211 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i8>, i64 } [[TMP1]], 1
212 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
213 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP2]]
214 //
test_vle8ff_v_i8mf8_m(vbool64_t mask,vint8mf8_t maskedoff,const int8_t * base,size_t * new_vl,size_t vl)215 vint8mf8_t test_vle8ff_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
216   return vle8ff_v_i8mf8_m(mask, maskedoff, base, new_vl, vl);
217 }
218 
219 //
220 // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf4_m(
221 // CHECK-RV64-NEXT:  entry:
222 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
223 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 2 x i8>, i64 } @llvm.riscv.vleff.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
224 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x i8>, i64 } [[TMP1]], 0
225 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x i8>, i64 } [[TMP1]], 1
226 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
227 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP2]]
228 //
test_vle8ff_v_i8mf4_m(vbool32_t mask,vint8mf4_t maskedoff,const int8_t * base,size_t * new_vl,size_t vl)229 vint8mf4_t test_vle8ff_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
230   return vle8ff_v_i8mf4_m(mask, maskedoff, base, new_vl, vl);
231 }
232 
233 //
234 // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf2_m(
235 // CHECK-RV64-NEXT:  entry:
236 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
237 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 4 x i8>, i64 } @llvm.riscv.vleff.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
238 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x i8>, i64 } [[TMP1]], 0
239 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x i8>, i64 } [[TMP1]], 1
240 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
241 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP2]]
242 //
test_vle8ff_v_i8mf2_m(vbool16_t mask,vint8mf2_t maskedoff,const int8_t * base,size_t * new_vl,size_t vl)243 vint8mf2_t test_vle8ff_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
244   return vle8ff_v_i8mf2_m(mask, maskedoff, base, new_vl, vl);
245 }
246 
247 //
248 // CHECK-RV64-LABEL: @test_vle8ff_v_i8m1_m(
249 // CHECK-RV64-NEXT:  entry:
250 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
251 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
252 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x i8>, i64 } [[TMP1]], 0
253 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x i8>, i64 } [[TMP1]], 1
254 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
255 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP2]]
256 //
test_vle8ff_v_i8m1_m(vbool8_t mask,vint8m1_t maskedoff,const int8_t * base,size_t * new_vl,size_t vl)257 vint8m1_t test_vle8ff_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
258   return vle8ff_v_i8m1_m(mask, maskedoff, base, new_vl, vl);
259 }
260 
261 //
262 // CHECK-RV64-LABEL: @test_vle8ff_v_i8m2_m(
263 // CHECK-RV64-NEXT:  entry:
264 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
265 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 16 x i8>, i64 } @llvm.riscv.vleff.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
266 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x i8>, i64 } [[TMP1]], 0
267 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 16 x i8>, i64 } [[TMP1]], 1
268 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
269 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP2]]
270 //
test_vle8ff_v_i8m2_m(vbool4_t mask,vint8m2_t maskedoff,const int8_t * base,size_t * new_vl,size_t vl)271 vint8m2_t test_vle8ff_v_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
272   return vle8ff_v_i8m2_m(mask, maskedoff, base, new_vl, vl);
273 }
274 
275 //
276 // CHECK-RV64-LABEL: @test_vle8ff_v_i8m4_m(
277 // CHECK-RV64-NEXT:  entry:
278 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
279 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 32 x i8>, i64 } @llvm.riscv.vleff.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
280 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 32 x i8>, i64 } [[TMP1]], 0
281 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 32 x i8>, i64 } [[TMP1]], 1
282 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
283 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP2]]
284 //
test_vle8ff_v_i8m4_m(vbool2_t mask,vint8m4_t maskedoff,const int8_t * base,size_t * new_vl,size_t vl)285 vint8m4_t test_vle8ff_v_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
286   return vle8ff_v_i8m4_m(mask, maskedoff, base, new_vl, vl);
287 }
288 
289 //
290 // CHECK-RV64-LABEL: @test_vle8ff_v_i8m8_m(
291 // CHECK-RV64-NEXT:  entry:
292 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
293 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 64 x i8>, i64 } @llvm.riscv.vleff.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
294 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 64 x i8>, i64 } [[TMP1]], 0
295 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 64 x i8>, i64 } [[TMP1]], 1
296 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
297 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP2]]
298 //
test_vle8ff_v_i8m8_m(vbool1_t mask,vint8m8_t maskedoff,const int8_t * base,size_t * new_vl,size_t vl)299 vint8m8_t test_vle8ff_v_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
300   return vle8ff_v_i8m8_m(mask, maskedoff, base, new_vl, vl);
301 }
302 
303 //
304 // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf8_m(
305 // CHECK-RV64-NEXT:  entry:
306 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
307 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 1 x i8>, i64 } @llvm.riscv.vleff.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
308 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i8>, i64 } [[TMP1]], 0
309 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i8>, i64 } [[TMP1]], 1
310 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
311 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP2]]
312 //
test_vle8ff_v_u8mf8_m(vbool64_t mask,vuint8mf8_t maskedoff,const uint8_t * base,size_t * new_vl,size_t vl)313 vuint8mf8_t test_vle8ff_v_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
314   return vle8ff_v_u8mf8_m(mask, maskedoff, base, new_vl, vl);
315 }
316 
317 //
318 // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf4_m(
319 // CHECK-RV64-NEXT:  entry:
320 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
321 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 2 x i8>, i64 } @llvm.riscv.vleff.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
322 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x i8>, i64 } [[TMP1]], 0
323 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x i8>, i64 } [[TMP1]], 1
324 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
325 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP2]]
326 //
test_vle8ff_v_u8mf4_m(vbool32_t mask,vuint8mf4_t maskedoff,const uint8_t * base,size_t * new_vl,size_t vl)327 vuint8mf4_t test_vle8ff_v_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
328   return vle8ff_v_u8mf4_m(mask, maskedoff, base, new_vl, vl);
329 }
330 
331 //
332 // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf2_m(
333 // CHECK-RV64-NEXT:  entry:
334 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
335 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 4 x i8>, i64 } @llvm.riscv.vleff.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
336 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x i8>, i64 } [[TMP1]], 0
337 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x i8>, i64 } [[TMP1]], 1
338 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
339 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP2]]
340 //
test_vle8ff_v_u8mf2_m(vbool16_t mask,vuint8mf2_t maskedoff,const uint8_t * base,size_t * new_vl,size_t vl)341 vuint8mf2_t test_vle8ff_v_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
342   return vle8ff_v_u8mf2_m(mask, maskedoff, base, new_vl, vl);
343 }
344 
345 //
346 // CHECK-RV64-LABEL: @test_vle8ff_v_u8m1_m(
347 // CHECK-RV64-NEXT:  entry:
348 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
349 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
350 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x i8>, i64 } [[TMP1]], 0
351 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x i8>, i64 } [[TMP1]], 1
352 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
353 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP2]]
354 //
test_vle8ff_v_u8m1_m(vbool8_t mask,vuint8m1_t maskedoff,const uint8_t * base,size_t * new_vl,size_t vl)355 vuint8m1_t test_vle8ff_v_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
356   return vle8ff_v_u8m1_m(mask, maskedoff, base, new_vl, vl);
357 }
358 
359 //
360 // CHECK-RV64-LABEL: @test_vle8ff_v_u8m2_m(
361 // CHECK-RV64-NEXT:  entry:
362 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
363 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 16 x i8>, i64 } @llvm.riscv.vleff.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
364 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x i8>, i64 } [[TMP1]], 0
365 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 16 x i8>, i64 } [[TMP1]], 1
366 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
367 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP2]]
368 //
test_vle8ff_v_u8m2_m(vbool4_t mask,vuint8m2_t maskedoff,const uint8_t * base,size_t * new_vl,size_t vl)369 vuint8m2_t test_vle8ff_v_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
370   return vle8ff_v_u8m2_m(mask, maskedoff, base, new_vl, vl);
371 }
372 
373 //
374 // CHECK-RV64-LABEL: @test_vle8ff_v_u8m4_m(
375 // CHECK-RV64-NEXT:  entry:
376 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
377 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 32 x i8>, i64 } @llvm.riscv.vleff.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
378 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 32 x i8>, i64 } [[TMP1]], 0
379 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 32 x i8>, i64 } [[TMP1]], 1
380 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
381 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP2]]
382 //
test_vle8ff_v_u8m4_m(vbool2_t mask,vuint8m4_t maskedoff,const uint8_t * base,size_t * new_vl,size_t vl)383 vuint8m4_t test_vle8ff_v_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
384   return vle8ff_v_u8m4_m(mask, maskedoff, base, new_vl, vl);
385 }
386 
387 //
388 // CHECK-RV64-LABEL: @test_vle8ff_v_u8m8_m(
389 // CHECK-RV64-NEXT:  entry:
390 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
391 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 64 x i8>, i64 } @llvm.riscv.vleff.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
392 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 64 x i8>, i64 } [[TMP1]], 0
393 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 64 x i8>, i64 } [[TMP1]], 1
394 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
395 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP2]]
396 //
test_vle8ff_v_u8m8_m(vbool1_t mask,vuint8m8_t maskedoff,const uint8_t * base,size_t * new_vl,size_t vl)397 vuint8m8_t test_vle8ff_v_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
398   return vle8ff_v_u8m8_m(mask, maskedoff, base, new_vl, vl);
399 }
400 
401 //
402 // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf4(
403 // CHECK-RV64-NEXT:  entry:
404 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
405 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 1 x i16>, i64 } @llvm.riscv.vleff.nxv1i16.i64(<vscale x 1 x i16>* [[TMP0]], i64 [[VL:%.*]])
406 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i16>, i64 } [[TMP1]], 0
407 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i16>, i64 } [[TMP1]], 1
408 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
409 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP2]]
410 //
test_vle16ff_v_i16mf4(const int16_t * base,size_t * new_vl,size_t vl)411 vint16mf4_t test_vle16ff_v_i16mf4 (const int16_t *base, size_t *new_vl, size_t vl) {
412   return vle16ff_v_i16mf4(base, new_vl, vl);
413 }
414 
415 //
416 // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf2(
417 // CHECK-RV64-NEXT:  entry:
418 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
419 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 2 x i16>, i64 } @llvm.riscv.vleff.nxv2i16.i64(<vscale x 2 x i16>* [[TMP0]], i64 [[VL:%.*]])
420 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x i16>, i64 } [[TMP1]], 0
421 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x i16>, i64 } [[TMP1]], 1
422 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
423 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP2]]
424 //
test_vle16ff_v_i16mf2(const int16_t * base,size_t * new_vl,size_t vl)425 vint16mf2_t test_vle16ff_v_i16mf2 (const int16_t *base, size_t *new_vl, size_t vl) {
426   return vle16ff_v_i16mf2(base, new_vl, vl);
427 }
428 
429 //
430 // CHECK-RV64-LABEL: @test_vle16ff_v_i16m1(
431 // CHECK-RV64-NEXT:  entry:
432 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
433 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 4 x i16>, i64 } @llvm.riscv.vleff.nxv4i16.i64(<vscale x 4 x i16>* [[TMP0]], i64 [[VL:%.*]])
434 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x i16>, i64 } [[TMP1]], 0
435 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x i16>, i64 } [[TMP1]], 1
436 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
437 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP2]]
438 //
test_vle16ff_v_i16m1(const int16_t * base,size_t * new_vl,size_t vl)439 vint16m1_t test_vle16ff_v_i16m1 (const int16_t *base, size_t *new_vl, size_t vl) {
440   return vle16ff_v_i16m1(base, new_vl, vl);
441 }
442 
443 //
444 // CHECK-RV64-LABEL: @test_vle16ff_v_i16m2(
445 // CHECK-RV64-NEXT:  entry:
446 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
447 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 8 x i16>, i64 } @llvm.riscv.vleff.nxv8i16.i64(<vscale x 8 x i16>* [[TMP0]], i64 [[VL:%.*]])
448 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x i16>, i64 } [[TMP1]], 0
449 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, i64 } [[TMP1]], 1
450 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
451 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP2]]
452 //
test_vle16ff_v_i16m2(const int16_t * base,size_t * new_vl,size_t vl)453 vint16m2_t test_vle16ff_v_i16m2 (const int16_t *base, size_t *new_vl, size_t vl) {
454   return vle16ff_v_i16m2(base, new_vl, vl);
455 }
456 
457 //
458 // CHECK-RV64-LABEL: @test_vle16ff_v_i16m4(
459 // CHECK-RV64-NEXT:  entry:
460 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
461 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 16 x i16>, i64 } @llvm.riscv.vleff.nxv16i16.i64(<vscale x 16 x i16>* [[TMP0]], i64 [[VL:%.*]])
462 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x i16>, i64 } [[TMP1]], 0
463 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 16 x i16>, i64 } [[TMP1]], 1
464 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
465 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP2]]
466 //
test_vle16ff_v_i16m4(const int16_t * base,size_t * new_vl,size_t vl)467 vint16m4_t test_vle16ff_v_i16m4 (const int16_t *base, size_t *new_vl, size_t vl) {
468   return vle16ff_v_i16m4(base, new_vl, vl);
469 }
470 
471 //
472 // CHECK-RV64-LABEL: @test_vle16ff_v_i16m8(
473 // CHECK-RV64-NEXT:  entry:
474 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
475 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 32 x i16>, i64 } @llvm.riscv.vleff.nxv32i16.i64(<vscale x 32 x i16>* [[TMP0]], i64 [[VL:%.*]])
476 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 32 x i16>, i64 } [[TMP1]], 0
477 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 32 x i16>, i64 } [[TMP1]], 1
478 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
479 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP2]]
480 //
test_vle16ff_v_i16m8(const int16_t * base,size_t * new_vl,size_t vl)481 vint16m8_t test_vle16ff_v_i16m8 (const int16_t *base, size_t *new_vl, size_t vl) {
482   return vle16ff_v_i16m8(base, new_vl, vl);
483 }
484 
485 //
486 // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf4(
487 // CHECK-RV64-NEXT:  entry:
488 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
489 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 1 x i16>, i64 } @llvm.riscv.vleff.nxv1i16.i64(<vscale x 1 x i16>* [[TMP0]], i64 [[VL:%.*]])
490 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i16>, i64 } [[TMP1]], 0
491 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i16>, i64 } [[TMP1]], 1
492 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
493 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP2]]
494 //
test_vle16ff_v_u16mf4(const uint16_t * base,size_t * new_vl,size_t vl)495 vuint16mf4_t test_vle16ff_v_u16mf4 (const uint16_t *base, size_t *new_vl, size_t vl) {
496   return vle16ff_v_u16mf4(base, new_vl, vl);
497 }
498 
499 //
500 // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf2(
501 // CHECK-RV64-NEXT:  entry:
502 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
503 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 2 x i16>, i64 } @llvm.riscv.vleff.nxv2i16.i64(<vscale x 2 x i16>* [[TMP0]], i64 [[VL:%.*]])
504 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x i16>, i64 } [[TMP1]], 0
505 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x i16>, i64 } [[TMP1]], 1
506 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
507 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP2]]
508 //
test_vle16ff_v_u16mf2(const uint16_t * base,size_t * new_vl,size_t vl)509 vuint16mf2_t test_vle16ff_v_u16mf2 (const uint16_t *base, size_t *new_vl, size_t vl) {
510   return vle16ff_v_u16mf2(base, new_vl, vl);
511 }
512 
513 //
514 // CHECK-RV64-LABEL: @test_vle16ff_v_u16m1(
515 // CHECK-RV64-NEXT:  entry:
516 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
517 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 4 x i16>, i64 } @llvm.riscv.vleff.nxv4i16.i64(<vscale x 4 x i16>* [[TMP0]], i64 [[VL:%.*]])
518 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x i16>, i64 } [[TMP1]], 0
519 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x i16>, i64 } [[TMP1]], 1
520 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
521 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP2]]
522 //
test_vle16ff_v_u16m1(const uint16_t * base,size_t * new_vl,size_t vl)523 vuint16m1_t test_vle16ff_v_u16m1 (const uint16_t *base, size_t *new_vl, size_t vl) {
524   return vle16ff_v_u16m1(base, new_vl, vl);
525 }
526 
527 //
528 // CHECK-RV64-LABEL: @test_vle16ff_v_u16m2(
529 // CHECK-RV64-NEXT:  entry:
530 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
531 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 8 x i16>, i64 } @llvm.riscv.vleff.nxv8i16.i64(<vscale x 8 x i16>* [[TMP0]], i64 [[VL:%.*]])
532 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x i16>, i64 } [[TMP1]], 0
533 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, i64 } [[TMP1]], 1
534 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
535 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP2]]
536 //
test_vle16ff_v_u16m2(const uint16_t * base,size_t * new_vl,size_t vl)537 vuint16m2_t test_vle16ff_v_u16m2 (const uint16_t *base, size_t *new_vl, size_t vl) {
538   return vle16ff_v_u16m2(base, new_vl, vl);
539 }
540 
541 //
542 // CHECK-RV64-LABEL: @test_vle16ff_v_u16m4(
543 // CHECK-RV64-NEXT:  entry:
544 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
545 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 16 x i16>, i64 } @llvm.riscv.vleff.nxv16i16.i64(<vscale x 16 x i16>* [[TMP0]], i64 [[VL:%.*]])
546 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x i16>, i64 } [[TMP1]], 0
547 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 16 x i16>, i64 } [[TMP1]], 1
548 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
549 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP2]]
550 //
test_vle16ff_v_u16m4(const uint16_t * base,size_t * new_vl,size_t vl)551 vuint16m4_t test_vle16ff_v_u16m4 (const uint16_t *base, size_t *new_vl, size_t vl) {
552   return vle16ff_v_u16m4(base, new_vl, vl);
553 }
554 
555 //
556 // CHECK-RV64-LABEL: @test_vle16ff_v_u16m8(
557 // CHECK-RV64-NEXT:  entry:
558 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
559 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 32 x i16>, i64 } @llvm.riscv.vleff.nxv32i16.i64(<vscale x 32 x i16>* [[TMP0]], i64 [[VL:%.*]])
560 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 32 x i16>, i64 } [[TMP1]], 0
561 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 32 x i16>, i64 } [[TMP1]], 1
562 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
563 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP2]]
564 //
test_vle16ff_v_u16m8(const uint16_t * base,size_t * new_vl,size_t vl)565 vuint16m8_t test_vle16ff_v_u16m8 (const uint16_t *base, size_t *new_vl, size_t vl) {
566   return vle16ff_v_u16m8(base, new_vl, vl);
567 }
568 
569 //
570 // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf4_m(
571 // CHECK-RV64-NEXT:  entry:
572 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
573 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 1 x i16>, i64 } @llvm.riscv.vleff.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
574 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i16>, i64 } [[TMP1]], 0
575 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i16>, i64 } [[TMP1]], 1
576 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
577 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP2]]
578 //
test_vle16ff_v_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,const int16_t * base,size_t * new_vl,size_t vl)579 vint16mf4_t test_vle16ff_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
580   return vle16ff_v_i16mf4_m(mask, maskedoff, base, new_vl, vl);
581 }
582 
583 //
584 // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf2_m(
585 // CHECK-RV64-NEXT:  entry:
586 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
587 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 2 x i16>, i64 } @llvm.riscv.vleff.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
588 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x i16>, i64 } [[TMP1]], 0
589 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x i16>, i64 } [[TMP1]], 1
590 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
591 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP2]]
592 //
test_vle16ff_v_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,const int16_t * base,size_t * new_vl,size_t vl)593 vint16mf2_t test_vle16ff_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
594   return vle16ff_v_i16mf2_m(mask, maskedoff, base, new_vl, vl);
595 }
596 
597 //
598 // CHECK-RV64-LABEL: @test_vle16ff_v_i16m1_m(
599 // CHECK-RV64-NEXT:  entry:
600 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
601 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 4 x i16>, i64 } @llvm.riscv.vleff.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
602 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x i16>, i64 } [[TMP1]], 0
603 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x i16>, i64 } [[TMP1]], 1
604 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
605 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP2]]
606 //
test_vle16ff_v_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,const int16_t * base,size_t * new_vl,size_t vl)607 vint16m1_t test_vle16ff_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
608   return vle16ff_v_i16m1_m(mask, maskedoff, base, new_vl, vl);
609 }
610 
611 //
612 // CHECK-RV64-LABEL: @test_vle16ff_v_i16m2_m(
613 // CHECK-RV64-NEXT:  entry:
614 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
615 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 8 x i16>, i64 } @llvm.riscv.vleff.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
616 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x i16>, i64 } [[TMP1]], 0
617 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, i64 } [[TMP1]], 1
618 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
619 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP2]]
620 //
test_vle16ff_v_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,const int16_t * base,size_t * new_vl,size_t vl)621 vint16m2_t test_vle16ff_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
622   return vle16ff_v_i16m2_m(mask, maskedoff, base, new_vl, vl);
623 }
624 
625 //
626 // CHECK-RV64-LABEL: @test_vle16ff_v_i16m4_m(
627 // CHECK-RV64-NEXT:  entry:
628 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
629 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 16 x i16>, i64 } @llvm.riscv.vleff.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
630 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x i16>, i64 } [[TMP1]], 0
631 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 16 x i16>, i64 } [[TMP1]], 1
632 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
633 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP2]]
634 //
test_vle16ff_v_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,const int16_t * base,size_t * new_vl,size_t vl)635 vint16m4_t test_vle16ff_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
636   return vle16ff_v_i16m4_m(mask, maskedoff, base, new_vl, vl);
637 }
638 
639 //
640 // CHECK-RV64-LABEL: @test_vle16ff_v_i16m8_m(
641 // CHECK-RV64-NEXT:  entry:
642 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
643 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 32 x i16>, i64 } @llvm.riscv.vleff.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
644 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 32 x i16>, i64 } [[TMP1]], 0
645 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 32 x i16>, i64 } [[TMP1]], 1
646 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
647 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP2]]
648 //
test_vle16ff_v_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,const int16_t * base,size_t * new_vl,size_t vl)649 vint16m8_t test_vle16ff_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
650   return vle16ff_v_i16m8_m(mask, maskedoff, base, new_vl, vl);
651 }
652 
653 //
654 // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf4_m(
655 // CHECK-RV64-NEXT:  entry:
656 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
657 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 1 x i16>, i64 } @llvm.riscv.vleff.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
658 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i16>, i64 } [[TMP1]], 0
659 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i16>, i64 } [[TMP1]], 1
660 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
661 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP2]]
662 //
test_vle16ff_v_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,const uint16_t * base,size_t * new_vl,size_t vl)663 vuint16mf4_t test_vle16ff_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
664   return vle16ff_v_u16mf4_m(mask, maskedoff, base, new_vl, vl);
665 }
666 
667 //
668 // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf2_m(
669 // CHECK-RV64-NEXT:  entry:
670 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
671 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 2 x i16>, i64 } @llvm.riscv.vleff.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
672 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x i16>, i64 } [[TMP1]], 0
673 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x i16>, i64 } [[TMP1]], 1
674 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
675 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP2]]
676 //
test_vle16ff_v_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,const uint16_t * base,size_t * new_vl,size_t vl)677 vuint16mf2_t test_vle16ff_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
678   return vle16ff_v_u16mf2_m(mask, maskedoff, base, new_vl, vl);
679 }
680 
681 //
682 // CHECK-RV64-LABEL: @test_vle16ff_v_u16m1_m(
683 // CHECK-RV64-NEXT:  entry:
684 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
685 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 4 x i16>, i64 } @llvm.riscv.vleff.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
686 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x i16>, i64 } [[TMP1]], 0
687 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x i16>, i64 } [[TMP1]], 1
688 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
689 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP2]]
690 //
test_vle16ff_v_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,const uint16_t * base,size_t * new_vl,size_t vl)691 vuint16m1_t test_vle16ff_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
692   return vle16ff_v_u16m1_m(mask, maskedoff, base, new_vl, vl);
693 }
694 
695 //
696 // CHECK-RV64-LABEL: @test_vle16ff_v_u16m2_m(
697 // CHECK-RV64-NEXT:  entry:
698 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
699 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 8 x i16>, i64 } @llvm.riscv.vleff.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
700 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x i16>, i64 } [[TMP1]], 0
701 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, i64 } [[TMP1]], 1
702 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
703 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP2]]
704 //
test_vle16ff_v_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,const uint16_t * base,size_t * new_vl,size_t vl)705 vuint16m2_t test_vle16ff_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
706   return vle16ff_v_u16m2_m(mask, maskedoff, base, new_vl, vl);
707 }
708 
709 //
710 // CHECK-RV64-LABEL: @test_vle16ff_v_u16m4_m(
711 // CHECK-RV64-NEXT:  entry:
712 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
713 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 16 x i16>, i64 } @llvm.riscv.vleff.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
714 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x i16>, i64 } [[TMP1]], 0
715 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 16 x i16>, i64 } [[TMP1]], 1
716 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
717 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP2]]
718 //
test_vle16ff_v_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,const uint16_t * base,size_t * new_vl,size_t vl)719 vuint16m4_t test_vle16ff_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
720   return vle16ff_v_u16m4_m(mask, maskedoff, base, new_vl, vl);
721 }
722 
723 //
724 // CHECK-RV64-LABEL: @test_vle16ff_v_u16m8_m(
725 // CHECK-RV64-NEXT:  entry:
726 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
727 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 32 x i16>, i64 } @llvm.riscv.vleff.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
728 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 32 x i16>, i64 } [[TMP1]], 0
729 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 32 x i16>, i64 } [[TMP1]], 1
730 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
731 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP2]]
732 //
test_vle16ff_v_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,const uint16_t * base,size_t * new_vl,size_t vl)733 vuint16m8_t test_vle16ff_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
734   return vle16ff_v_u16m8_m(mask, maskedoff, base, new_vl, vl);
735 }
736 
737 //
738 // CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2(
739 // CHECK-RV64-NEXT:  entry:
740 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
741 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], i64 [[VL:%.*]])
742 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 0
743 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 1
744 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
745 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP2]]
746 //
test_vle32ff_v_i32mf2(const int32_t * base,size_t * new_vl,size_t vl)747 vint32mf2_t test_vle32ff_v_i32mf2 (const int32_t *base, size_t *new_vl, size_t vl) {
748   return vle32ff_v_i32mf2(base, new_vl, vl);
749 }
750 
751 //
752 // CHECK-RV64-LABEL: @test_vle32ff_v_i32m1(
753 // CHECK-RV64-NEXT:  entry:
754 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
755 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], i64 [[VL:%.*]])
756 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, i64 } [[TMP1]], 0
757 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, i64 } [[TMP1]], 1
758 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
759 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP2]]
760 //
test_vle32ff_v_i32m1(const int32_t * base,size_t * new_vl,size_t vl)761 vint32m1_t test_vle32ff_v_i32m1 (const int32_t *base, size_t *new_vl, size_t vl) {
762   return vle32ff_v_i32m1(base, new_vl, vl);
763 }
764 
765 //
766 // CHECK-RV64-LABEL: @test_vle32ff_v_i32m2(
767 // CHECK-RV64-NEXT:  entry:
768 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
769 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 4 x i32>, i64 } @llvm.riscv.vleff.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], i64 [[VL:%.*]])
770 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, i64 } [[TMP1]], 0
771 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, i64 } [[TMP1]], 1
772 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
773 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP2]]
774 //
test_vle32ff_v_i32m2(const int32_t * base,size_t * new_vl,size_t vl)775 vint32m2_t test_vle32ff_v_i32m2 (const int32_t *base, size_t *new_vl, size_t vl) {
776   return vle32ff_v_i32m2(base, new_vl, vl);
777 }
778 
779 //
780 // CHECK-RV64-LABEL: @test_vle32ff_v_i32m4(
781 // CHECK-RV64-NEXT:  entry:
782 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
783 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 8 x i32>, i64 } @llvm.riscv.vleff.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], i64 [[VL:%.*]])
784 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, i64 } [[TMP1]], 0
785 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, i64 } [[TMP1]], 1
786 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
787 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP2]]
788 //
test_vle32ff_v_i32m4(const int32_t * base,size_t * new_vl,size_t vl)789 vint32m4_t test_vle32ff_v_i32m4 (const int32_t *base, size_t *new_vl, size_t vl) {
790   return vle32ff_v_i32m4(base, new_vl, vl);
791 }
792 
793 //
794 // CHECK-RV64-LABEL: @test_vle32ff_v_i32m8(
795 // CHECK-RV64-NEXT:  entry:
796 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
797 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 16 x i32>, i64 } @llvm.riscv.vleff.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], i64 [[VL:%.*]])
798 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x i32>, i64 } [[TMP1]], 0
799 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 16 x i32>, i64 } [[TMP1]], 1
800 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
801 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP2]]
802 //
test_vle32ff_v_i32m8(const int32_t * base,size_t * new_vl,size_t vl)803 vint32m8_t test_vle32ff_v_i32m8 (const int32_t *base, size_t *new_vl, size_t vl) {
804   return vle32ff_v_i32m8(base, new_vl, vl);
805 }
806 
807 //
808 // CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2(
809 // CHECK-RV64-NEXT:  entry:
810 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
811 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], i64 [[VL:%.*]])
812 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 0
813 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 1
814 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
815 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP2]]
816 //
test_vle32ff_v_u32mf2(const uint32_t * base,size_t * new_vl,size_t vl)817 vuint32mf2_t test_vle32ff_v_u32mf2 (const uint32_t *base, size_t *new_vl, size_t vl) {
818   return vle32ff_v_u32mf2(base, new_vl, vl);
819 }
820 
821 //
822 // CHECK-RV64-LABEL: @test_vle32ff_v_u32m1(
823 // CHECK-RV64-NEXT:  entry:
824 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
825 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], i64 [[VL:%.*]])
826 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, i64 } [[TMP1]], 0
827 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, i64 } [[TMP1]], 1
828 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
829 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP2]]
830 //
test_vle32ff_v_u32m1(const uint32_t * base,size_t * new_vl,size_t vl)831 vuint32m1_t test_vle32ff_v_u32m1 (const uint32_t *base, size_t *new_vl, size_t vl) {
832   return vle32ff_v_u32m1(base, new_vl, vl);
833 }
834 
835 //
836 // CHECK-RV64-LABEL: @test_vle32ff_v_u32m2(
837 // CHECK-RV64-NEXT:  entry:
838 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
839 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 4 x i32>, i64 } @llvm.riscv.vleff.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], i64 [[VL:%.*]])
840 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, i64 } [[TMP1]], 0
841 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, i64 } [[TMP1]], 1
842 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
843 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP2]]
844 //
test_vle32ff_v_u32m2(const uint32_t * base,size_t * new_vl,size_t vl)845 vuint32m2_t test_vle32ff_v_u32m2 (const uint32_t *base, size_t *new_vl, size_t vl) {
846   return vle32ff_v_u32m2(base, new_vl, vl);
847 }
848 
849 //
850 // CHECK-RV64-LABEL: @test_vle32ff_v_u32m4(
851 // CHECK-RV64-NEXT:  entry:
852 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
853 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 8 x i32>, i64 } @llvm.riscv.vleff.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], i64 [[VL:%.*]])
854 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, i64 } [[TMP1]], 0
855 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, i64 } [[TMP1]], 1
856 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
857 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP2]]
858 //
test_vle32ff_v_u32m4(const uint32_t * base,size_t * new_vl,size_t vl)859 vuint32m4_t test_vle32ff_v_u32m4 (const uint32_t *base, size_t *new_vl, size_t vl) {
860   return vle32ff_v_u32m4(base, new_vl, vl);
861 }
862 
863 //
864 // CHECK-RV64-LABEL: @test_vle32ff_v_u32m8(
865 // CHECK-RV64-NEXT:  entry:
866 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
867 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 16 x i32>, i64 } @llvm.riscv.vleff.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], i64 [[VL:%.*]])
868 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x i32>, i64 } [[TMP1]], 0
869 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 16 x i32>, i64 } [[TMP1]], 1
870 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
871 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP2]]
872 //
test_vle32ff_v_u32m8(const uint32_t * base,size_t * new_vl,size_t vl)873 vuint32m8_t test_vle32ff_v_u32m8 (const uint32_t *base, size_t *new_vl, size_t vl) {
874   return vle32ff_v_u32m8(base, new_vl, vl);
875 }
876 
877 //
878 // CHECK-RV64-LABEL: @test_vle32ff_v_f32mf2(
879 // CHECK-RV64-NEXT:  entry:
880 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
881 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 1 x float>, i64 } @llvm.riscv.vleff.nxv1f32.i64(<vscale x 1 x float>* [[TMP0]], i64 [[VL:%.*]])
882 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x float>, i64 } [[TMP1]], 0
883 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x float>, i64 } [[TMP1]], 1
884 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
885 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP2]]
886 //
test_vle32ff_v_f32mf2(const float * base,size_t * new_vl,size_t vl)887 vfloat32mf2_t test_vle32ff_v_f32mf2 (const float *base, size_t *new_vl, size_t vl) {
888   return vle32ff_v_f32mf2(base, new_vl, vl);
889 }
890 
891 //
892 // CHECK-RV64-LABEL: @test_vle32ff_v_f32m1(
893 // CHECK-RV64-NEXT:  entry:
894 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
895 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 2 x float>, i64 } @llvm.riscv.vleff.nxv2f32.i64(<vscale x 2 x float>* [[TMP0]], i64 [[VL:%.*]])
896 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x float>, i64 } [[TMP1]], 0
897 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x float>, i64 } [[TMP1]], 1
898 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
899 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP2]]
900 //
test_vle32ff_v_f32m1(const float * base,size_t * new_vl,size_t vl)901 vfloat32m1_t test_vle32ff_v_f32m1 (const float *base, size_t *new_vl, size_t vl) {
902   return vle32ff_v_f32m1(base, new_vl, vl);
903 }
904 
905 //
906 // CHECK-RV64-LABEL: @test_vle32ff_v_f32m2(
907 // CHECK-RV64-NEXT:  entry:
908 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
909 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 4 x float>, i64 } @llvm.riscv.vleff.nxv4f32.i64(<vscale x 4 x float>* [[TMP0]], i64 [[VL:%.*]])
910 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x float>, i64 } [[TMP1]], 0
911 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x float>, i64 } [[TMP1]], 1
912 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
913 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP2]]
914 //
test_vle32ff_v_f32m2(const float * base,size_t * new_vl,size_t vl)915 vfloat32m2_t test_vle32ff_v_f32m2 (const float *base, size_t *new_vl, size_t vl) {
916   return vle32ff_v_f32m2(base, new_vl, vl);
917 }
918 
919 //
920 // CHECK-RV64-LABEL: @test_vle32ff_v_f32m4(
921 // CHECK-RV64-NEXT:  entry:
922 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
923 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 8 x float>, i64 } @llvm.riscv.vleff.nxv8f32.i64(<vscale x 8 x float>* [[TMP0]], i64 [[VL:%.*]])
924 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x float>, i64 } [[TMP1]], 0
925 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x float>, i64 } [[TMP1]], 1
926 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
927 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP2]]
928 //
test_vle32ff_v_f32m4(const float * base,size_t * new_vl,size_t vl)929 vfloat32m4_t test_vle32ff_v_f32m4 (const float *base, size_t *new_vl, size_t vl) {
930   return vle32ff_v_f32m4(base, new_vl, vl);
931 }
932 
933 //
934 // CHECK-RV64-LABEL: @test_vle32ff_v_f32m8(
935 // CHECK-RV64-NEXT:  entry:
936 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
937 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 16 x float>, i64 } @llvm.riscv.vleff.nxv16f32.i64(<vscale x 16 x float>* [[TMP0]], i64 [[VL:%.*]])
938 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x float>, i64 } [[TMP1]], 0
939 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 16 x float>, i64 } [[TMP1]], 1
940 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
941 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP2]]
942 //
test_vle32ff_v_f32m8(const float * base,size_t * new_vl,size_t vl)943 vfloat32m8_t test_vle32ff_v_f32m8 (const float *base, size_t *new_vl, size_t vl) {
944   return vle32ff_v_f32m8(base, new_vl, vl);
945 }
946 
947 //
948 // CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2_m(
949 // CHECK-RV64-NEXT:  entry:
950 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
951 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
952 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 0
953 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 1
954 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
955 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP2]]
956 //
test_vle32ff_v_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,const int32_t * base,size_t * new_vl,size_t vl)957 vint32mf2_t test_vle32ff_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) {
958   return vle32ff_v_i32mf2_m(mask, maskedoff, base, new_vl, vl);
959 }
960 
961 //
962 // CHECK-RV64-LABEL: @test_vle32ff_v_i32m1_m(
963 // CHECK-RV64-NEXT:  entry:
964 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
965 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
966 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, i64 } [[TMP1]], 0
967 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, i64 } [[TMP1]], 1
968 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
969 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP2]]
970 //
test_vle32ff_v_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,const int32_t * base,size_t * new_vl,size_t vl)971 vint32m1_t test_vle32ff_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) {
972   return vle32ff_v_i32m1_m(mask, maskedoff, base, new_vl, vl);
973 }
974 
975 //
976 // CHECK-RV64-LABEL: @test_vle32ff_v_i32m2_m(
977 // CHECK-RV64-NEXT:  entry:
978 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
979 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 4 x i32>, i64 } @llvm.riscv.vleff.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
980 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, i64 } [[TMP1]], 0
981 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, i64 } [[TMP1]], 1
982 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
983 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP2]]
984 //
test_vle32ff_v_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,const int32_t * base,size_t * new_vl,size_t vl)985 vint32m2_t test_vle32ff_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) {
986   return vle32ff_v_i32m2_m(mask, maskedoff, base, new_vl, vl);
987 }
988 
989 //
990 // CHECK-RV64-LABEL: @test_vle32ff_v_i32m4_m(
991 // CHECK-RV64-NEXT:  entry:
992 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
993 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 8 x i32>, i64 } @llvm.riscv.vleff.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
994 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, i64 } [[TMP1]], 0
995 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, i64 } [[TMP1]], 1
996 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
997 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP2]]
998 //
test_vle32ff_v_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,const int32_t * base,size_t * new_vl,size_t vl)999 vint32m4_t test_vle32ff_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) {
1000   return vle32ff_v_i32m4_m(mask, maskedoff, base, new_vl, vl);
1001 }
1002 
1003 //
1004 // CHECK-RV64-LABEL: @test_vle32ff_v_i32m8_m(
1005 // CHECK-RV64-NEXT:  entry:
1006 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
1007 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 16 x i32>, i64 } @llvm.riscv.vleff.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1008 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x i32>, i64 } [[TMP1]], 0
1009 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 16 x i32>, i64 } [[TMP1]], 1
1010 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1011 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP2]]
1012 //
test_vle32ff_v_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,const int32_t * base,size_t * new_vl,size_t vl)1013 vint32m8_t test_vle32ff_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) {
1014   return vle32ff_v_i32m8_m(mask, maskedoff, base, new_vl, vl);
1015 }
1016 
1017 //
1018 // CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2_m(
1019 // CHECK-RV64-NEXT:  entry:
1020 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
1021 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1022 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 0
1023 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 1
1024 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1025 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP2]]
1026 //
test_vle32ff_v_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,const uint32_t * base,size_t * new_vl,size_t vl)1027 vuint32mf2_t test_vle32ff_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) {
1028   return vle32ff_v_u32mf2_m(mask, maskedoff, base, new_vl, vl);
1029 }
1030 
1031 //
1032 // CHECK-RV64-LABEL: @test_vle32ff_v_u32m1_m(
1033 // CHECK-RV64-NEXT:  entry:
1034 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
1035 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1036 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, i64 } [[TMP1]], 0
1037 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, i64 } [[TMP1]], 1
1038 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1039 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP2]]
1040 //
test_vle32ff_v_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,const uint32_t * base,size_t * new_vl,size_t vl)1041 vuint32m1_t test_vle32ff_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) {
1042   return vle32ff_v_u32m1_m(mask, maskedoff, base, new_vl, vl);
1043 }
1044 
1045 //
1046 // CHECK-RV64-LABEL: @test_vle32ff_v_u32m2_m(
1047 // CHECK-RV64-NEXT:  entry:
1048 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
1049 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 4 x i32>, i64 } @llvm.riscv.vleff.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1050 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, i64 } [[TMP1]], 0
1051 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, i64 } [[TMP1]], 1
1052 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1053 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP2]]
1054 //
test_vle32ff_v_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,const uint32_t * base,size_t * new_vl,size_t vl)1055 vuint32m2_t test_vle32ff_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) {
1056   return vle32ff_v_u32m2_m(mask, maskedoff, base, new_vl, vl);
1057 }
1058 
1059 //
1060 // CHECK-RV64-LABEL: @test_vle32ff_v_u32m4_m(
1061 // CHECK-RV64-NEXT:  entry:
1062 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
1063 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 8 x i32>, i64 } @llvm.riscv.vleff.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1064 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, i64 } [[TMP1]], 0
1065 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, i64 } [[TMP1]], 1
1066 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1067 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP2]]
1068 //
test_vle32ff_v_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,const uint32_t * base,size_t * new_vl,size_t vl)1069 vuint32m4_t test_vle32ff_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) {
1070   return vle32ff_v_u32m4_m(mask, maskedoff, base, new_vl, vl);
1071 }
1072 
1073 //
1074 // CHECK-RV64-LABEL: @test_vle32ff_v_u32m8_m(
1075 // CHECK-RV64-NEXT:  entry:
1076 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
1077 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 16 x i32>, i64 } @llvm.riscv.vleff.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1078 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x i32>, i64 } [[TMP1]], 0
1079 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 16 x i32>, i64 } [[TMP1]], 1
1080 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1081 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP2]]
1082 //
test_vle32ff_v_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,const uint32_t * base,size_t * new_vl,size_t vl)1083 vuint32m8_t test_vle32ff_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) {
1084   return vle32ff_v_u32m8_m(mask, maskedoff, base, new_vl, vl);
1085 }
1086 
1087 //
1088 // CHECK-RV64-LABEL: @test_vle32ff_v_f32mf2_m(
1089 // CHECK-RV64-NEXT:  entry:
1090 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
1091 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 1 x float>, i64 } @llvm.riscv.vleff.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1092 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x float>, i64 } [[TMP1]], 0
1093 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x float>, i64 } [[TMP1]], 1
1094 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1095 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP2]]
1096 //
test_vle32ff_v_f32mf2_m(vbool64_t mask,vfloat32mf2_t maskedoff,const float * base,size_t * new_vl,size_t vl)1097 vfloat32mf2_t test_vle32ff_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t *new_vl, size_t vl) {
1098   return vle32ff_v_f32mf2_m(mask, maskedoff, base, new_vl, vl);
1099 }
1100 
1101 //
1102 // CHECK-RV64-LABEL: @test_vle32ff_v_f32m1_m(
1103 // CHECK-RV64-NEXT:  entry:
1104 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
1105 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 2 x float>, i64 } @llvm.riscv.vleff.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1106 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x float>, i64 } [[TMP1]], 0
1107 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x float>, i64 } [[TMP1]], 1
1108 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1109 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP2]]
1110 //
test_vle32ff_v_f32m1_m(vbool32_t mask,vfloat32m1_t maskedoff,const float * base,size_t * new_vl,size_t vl)1111 vfloat32m1_t test_vle32ff_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t *new_vl, size_t vl) {
1112   return vle32ff_v_f32m1_m(mask, maskedoff, base, new_vl, vl);
1113 }
1114 
1115 //
1116 // CHECK-RV64-LABEL: @test_vle32ff_v_f32m2_m(
1117 // CHECK-RV64-NEXT:  entry:
1118 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
1119 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 4 x float>, i64 } @llvm.riscv.vleff.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1120 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x float>, i64 } [[TMP1]], 0
1121 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x float>, i64 } [[TMP1]], 1
1122 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1123 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP2]]
1124 //
test_vle32ff_v_f32m2_m(vbool16_t mask,vfloat32m2_t maskedoff,const float * base,size_t * new_vl,size_t vl)1125 vfloat32m2_t test_vle32ff_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t *new_vl, size_t vl) {
1126   return vle32ff_v_f32m2_m(mask, maskedoff, base, new_vl, vl);
1127 }
1128 
1129 //
1130 // CHECK-RV64-LABEL: @test_vle32ff_v_f32m4_m(
1131 // CHECK-RV64-NEXT:  entry:
1132 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
1133 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 8 x float>, i64 } @llvm.riscv.vleff.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1134 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x float>, i64 } [[TMP1]], 0
1135 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x float>, i64 } [[TMP1]], 1
1136 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1137 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP2]]
1138 //
test_vle32ff_v_f32m4_m(vbool8_t mask,vfloat32m4_t maskedoff,const float * base,size_t * new_vl,size_t vl)1139 vfloat32m4_t test_vle32ff_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t *new_vl, size_t vl) {
1140   return vle32ff_v_f32m4_m(mask, maskedoff, base, new_vl, vl);
1141 }
1142 
1143 //
1144 // CHECK-RV64-LABEL: @test_vle32ff_v_f32m8_m(
1145 // CHECK-RV64-NEXT:  entry:
1146 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
1147 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 16 x float>, i64 } @llvm.riscv.vleff.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1148 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 16 x float>, i64 } [[TMP1]], 0
1149 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 16 x float>, i64 } [[TMP1]], 1
1150 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1151 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP2]]
1152 //
test_vle32ff_v_f32m8_m(vbool4_t mask,vfloat32m8_t maskedoff,const float * base,size_t * new_vl,size_t vl)1153 vfloat32m8_t test_vle32ff_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t *new_vl, size_t vl) {
1154   return vle32ff_v_f32m8_m(mask, maskedoff, base, new_vl, vl);
1155 }
1156 
1157 //
1158 // CHECK-RV64-LABEL: @test_vle64ff_v_i64m1(
1159 // CHECK-RV64-NEXT:  entry:
1160 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
1161 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], i64 [[VL:%.*]])
1162 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i64>, i64 } [[TMP1]], 0
1163 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i64>, i64 } [[TMP1]], 1
1164 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1165 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP2]]
1166 //
test_vle64ff_v_i64m1(const int64_t * base,size_t * new_vl,size_t vl)1167 vint64m1_t test_vle64ff_v_i64m1 (const int64_t *base, size_t *new_vl, size_t vl) {
1168   return vle64ff_v_i64m1(base, new_vl, vl);
1169 }
1170 
1171 //
1172 // CHECK-RV64-LABEL: @test_vle64ff_v_i64m2(
1173 // CHECK-RV64-NEXT:  entry:
1174 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
1175 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 2 x i64>, i64 } @llvm.riscv.vleff.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], i64 [[VL:%.*]])
1176 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x i64>, i64 } [[TMP1]], 0
1177 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x i64>, i64 } [[TMP1]], 1
1178 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1179 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP2]]
1180 //
test_vle64ff_v_i64m2(const int64_t * base,size_t * new_vl,size_t vl)1181 vint64m2_t test_vle64ff_v_i64m2 (const int64_t *base, size_t *new_vl, size_t vl) {
1182   return vle64ff_v_i64m2(base, new_vl, vl);
1183 }
1184 
1185 //
1186 // CHECK-RV64-LABEL: @test_vle64ff_v_i64m4(
1187 // CHECK-RV64-NEXT:  entry:
1188 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
1189 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 4 x i64>, i64 } @llvm.riscv.vleff.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], i64 [[VL:%.*]])
1190 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x i64>, i64 } [[TMP1]], 0
1191 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x i64>, i64 } [[TMP1]], 1
1192 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1193 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP2]]
1194 //
test_vle64ff_v_i64m4(const int64_t * base,size_t * new_vl,size_t vl)1195 vint64m4_t test_vle64ff_v_i64m4 (const int64_t *base, size_t *new_vl, size_t vl) {
1196   return vle64ff_v_i64m4(base, new_vl, vl);
1197 }
1198 
1199 //
1200 // CHECK-RV64-LABEL: @test_vle64ff_v_i64m8(
1201 // CHECK-RV64-NEXT:  entry:
1202 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
1203 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 8 x i64>, i64 } @llvm.riscv.vleff.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], i64 [[VL:%.*]])
1204 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x i64>, i64 } [[TMP1]], 0
1205 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x i64>, i64 } [[TMP1]], 1
1206 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1207 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP2]]
1208 //
test_vle64ff_v_i64m8(const int64_t * base,size_t * new_vl,size_t vl)1209 vint64m8_t test_vle64ff_v_i64m8 (const int64_t *base, size_t *new_vl, size_t vl) {
1210   return vle64ff_v_i64m8(base, new_vl, vl);
1211 }
1212 
1213 //
1214 // CHECK-RV64-LABEL: @test_vle64ff_v_u64m1(
1215 // CHECK-RV64-NEXT:  entry:
1216 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
1217 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], i64 [[VL:%.*]])
1218 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i64>, i64 } [[TMP1]], 0
1219 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i64>, i64 } [[TMP1]], 1
1220 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1221 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP2]]
1222 //
test_vle64ff_v_u64m1(const uint64_t * base,size_t * new_vl,size_t vl)1223 vuint64m1_t test_vle64ff_v_u64m1 (const uint64_t *base, size_t *new_vl, size_t vl) {
1224   return vle64ff_v_u64m1(base, new_vl, vl);
1225 }
1226 
1227 //
1228 // CHECK-RV64-LABEL: @test_vle64ff_v_u64m2(
1229 // CHECK-RV64-NEXT:  entry:
1230 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
1231 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 2 x i64>, i64 } @llvm.riscv.vleff.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], i64 [[VL:%.*]])
1232 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x i64>, i64 } [[TMP1]], 0
1233 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x i64>, i64 } [[TMP1]], 1
1234 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1235 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP2]]
1236 //
test_vle64ff_v_u64m2(const uint64_t * base,size_t * new_vl,size_t vl)1237 vuint64m2_t test_vle64ff_v_u64m2 (const uint64_t *base, size_t *new_vl, size_t vl) {
1238   return vle64ff_v_u64m2(base, new_vl, vl);
1239 }
1240 
1241 //
1242 // CHECK-RV64-LABEL: @test_vle64ff_v_u64m4(
1243 // CHECK-RV64-NEXT:  entry:
1244 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
1245 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 4 x i64>, i64 } @llvm.riscv.vleff.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], i64 [[VL:%.*]])
1246 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x i64>, i64 } [[TMP1]], 0
1247 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x i64>, i64 } [[TMP1]], 1
1248 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1249 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP2]]
1250 //
test_vle64ff_v_u64m4(const uint64_t * base,size_t * new_vl,size_t vl)1251 vuint64m4_t test_vle64ff_v_u64m4 (const uint64_t *base, size_t *new_vl, size_t vl) {
1252   return vle64ff_v_u64m4(base, new_vl, vl);
1253 }
1254 
1255 //
1256 // CHECK-RV64-LABEL: @test_vle64ff_v_u64m8(
1257 // CHECK-RV64-NEXT:  entry:
1258 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
1259 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 8 x i64>, i64 } @llvm.riscv.vleff.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], i64 [[VL:%.*]])
1260 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x i64>, i64 } [[TMP1]], 0
1261 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x i64>, i64 } [[TMP1]], 1
1262 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1263 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP2]]
1264 //
test_vle64ff_v_u64m8(const uint64_t * base,size_t * new_vl,size_t vl)1265 vuint64m8_t test_vle64ff_v_u64m8 (const uint64_t *base, size_t *new_vl, size_t vl) {
1266   return vle64ff_v_u64m8(base, new_vl, vl);
1267 }
1268 
1269 //
1270 // CHECK-RV64-LABEL: @test_vle64ff_v_f64m1(
1271 // CHECK-RV64-NEXT:  entry:
1272 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
1273 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.nxv1f64.i64(<vscale x 1 x double>* [[TMP0]], i64 [[VL:%.*]])
1274 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x double>, i64 } [[TMP1]], 0
1275 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x double>, i64 } [[TMP1]], 1
1276 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1277 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP2]]
1278 //
test_vle64ff_v_f64m1(const double * base,size_t * new_vl,size_t vl)1279 vfloat64m1_t test_vle64ff_v_f64m1 (const double *base, size_t *new_vl, size_t vl) {
1280   return vle64ff_v_f64m1(base, new_vl, vl);
1281 }
1282 
1283 //
1284 // CHECK-RV64-LABEL: @test_vle64ff_v_f64m2(
1285 // CHECK-RV64-NEXT:  entry:
1286 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
1287 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 2 x double>, i64 } @llvm.riscv.vleff.nxv2f64.i64(<vscale x 2 x double>* [[TMP0]], i64 [[VL:%.*]])
1288 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x double>, i64 } [[TMP1]], 0
1289 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x double>, i64 } [[TMP1]], 1
1290 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1291 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP2]]
1292 //
test_vle64ff_v_f64m2(const double * base,size_t * new_vl,size_t vl)1293 vfloat64m2_t test_vle64ff_v_f64m2 (const double *base, size_t *new_vl, size_t vl) {
1294   return vle64ff_v_f64m2(base, new_vl, vl);
1295 }
1296 
1297 //
1298 // CHECK-RV64-LABEL: @test_vle64ff_v_f64m4(
1299 // CHECK-RV64-NEXT:  entry:
1300 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
1301 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 4 x double>, i64 } @llvm.riscv.vleff.nxv4f64.i64(<vscale x 4 x double>* [[TMP0]], i64 [[VL:%.*]])
1302 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x double>, i64 } [[TMP1]], 0
1303 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x double>, i64 } [[TMP1]], 1
1304 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1305 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP2]]
1306 //
test_vle64ff_v_f64m4(const double * base,size_t * new_vl,size_t vl)1307 vfloat64m4_t test_vle64ff_v_f64m4 (const double *base, size_t *new_vl, size_t vl) {
1308   return vle64ff_v_f64m4(base, new_vl, vl);
1309 }
1310 
1311 //
1312 // CHECK-RV64-LABEL: @test_vle64ff_v_f64m8(
1313 // CHECK-RV64-NEXT:  entry:
1314 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
1315 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 8 x double>, i64 } @llvm.riscv.vleff.nxv8f64.i64(<vscale x 8 x double>* [[TMP0]], i64 [[VL:%.*]])
1316 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x double>, i64 } [[TMP1]], 0
1317 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x double>, i64 } [[TMP1]], 1
1318 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1319 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP2]]
1320 //
test_vle64ff_v_f64m8(const double * base,size_t * new_vl,size_t vl)1321 vfloat64m8_t test_vle64ff_v_f64m8 (const double *base, size_t *new_vl, size_t vl) {
1322   return vle64ff_v_f64m8(base, new_vl, vl);
1323 }
1324 
1325 //
1326 // CHECK-RV64-LABEL: @test_vle64ff_v_i64m1_m(
1327 // CHECK-RV64-NEXT:  entry:
1328 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
1329 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1330 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i64>, i64 } [[TMP1]], 0
1331 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i64>, i64 } [[TMP1]], 1
1332 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1333 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP2]]
1334 //
test_vle64ff_v_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,const int64_t * base,size_t * new_vl,size_t vl)1335 vint64m1_t test_vle64ff_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) {
1336   return vle64ff_v_i64m1_m(mask, maskedoff, base, new_vl, vl);
1337 }
1338 
1339 //
1340 // CHECK-RV64-LABEL: @test_vle64ff_v_i64m2_m(
1341 // CHECK-RV64-NEXT:  entry:
1342 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
1343 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 2 x i64>, i64 } @llvm.riscv.vleff.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1344 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x i64>, i64 } [[TMP1]], 0
1345 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x i64>, i64 } [[TMP1]], 1
1346 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1347 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP2]]
1348 //
test_vle64ff_v_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,const int64_t * base,size_t * new_vl,size_t vl)1349 vint64m2_t test_vle64ff_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) {
1350   return vle64ff_v_i64m2_m(mask, maskedoff, base, new_vl, vl);
1351 }
1352 
1353 //
1354 // CHECK-RV64-LABEL: @test_vle64ff_v_i64m4_m(
1355 // CHECK-RV64-NEXT:  entry:
1356 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
1357 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 4 x i64>, i64 } @llvm.riscv.vleff.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1358 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x i64>, i64 } [[TMP1]], 0
1359 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x i64>, i64 } [[TMP1]], 1
1360 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1361 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP2]]
1362 //
test_vle64ff_v_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,const int64_t * base,size_t * new_vl,size_t vl)1363 vint64m4_t test_vle64ff_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) {
1364   return vle64ff_v_i64m4_m(mask, maskedoff, base, new_vl, vl);
1365 }
1366 
1367 //
1368 // CHECK-RV64-LABEL: @test_vle64ff_v_i64m8_m(
1369 // CHECK-RV64-NEXT:  entry:
1370 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
1371 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 8 x i64>, i64 } @llvm.riscv.vleff.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1372 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x i64>, i64 } [[TMP1]], 0
1373 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x i64>, i64 } [[TMP1]], 1
1374 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1375 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP2]]
1376 //
test_vle64ff_v_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,const int64_t * base,size_t * new_vl,size_t vl)1377 vint64m8_t test_vle64ff_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) {
1378   return vle64ff_v_i64m8_m(mask, maskedoff, base, new_vl, vl);
1379 }
1380 
1381 //
1382 // CHECK-RV64-LABEL: @test_vle64ff_v_u64m1_m(
1383 // CHECK-RV64-NEXT:  entry:
1384 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
1385 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1386 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i64>, i64 } [[TMP1]], 0
1387 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i64>, i64 } [[TMP1]], 1
1388 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1389 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP2]]
1390 //
test_vle64ff_v_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,const uint64_t * base,size_t * new_vl,size_t vl)1391 vuint64m1_t test_vle64ff_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) {
1392   return vle64ff_v_u64m1_m(mask, maskedoff, base, new_vl, vl);
1393 }
1394 
1395 //
1396 // CHECK-RV64-LABEL: @test_vle64ff_v_u64m2_m(
1397 // CHECK-RV64-NEXT:  entry:
1398 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
1399 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 2 x i64>, i64 } @llvm.riscv.vleff.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1400 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x i64>, i64 } [[TMP1]], 0
1401 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x i64>, i64 } [[TMP1]], 1
1402 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1403 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP2]]
1404 //
test_vle64ff_v_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,const uint64_t * base,size_t * new_vl,size_t vl)1405 vuint64m2_t test_vle64ff_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) {
1406   return vle64ff_v_u64m2_m(mask, maskedoff, base, new_vl, vl);
1407 }
1408 
1409 //
1410 // CHECK-RV64-LABEL: @test_vle64ff_v_u64m4_m(
1411 // CHECK-RV64-NEXT:  entry:
1412 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
1413 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 4 x i64>, i64 } @llvm.riscv.vleff.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1414 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x i64>, i64 } [[TMP1]], 0
1415 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x i64>, i64 } [[TMP1]], 1
1416 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1417 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP2]]
1418 //
test_vle64ff_v_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,const uint64_t * base,size_t * new_vl,size_t vl)1419 vuint64m4_t test_vle64ff_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) {
1420   return vle64ff_v_u64m4_m(mask, maskedoff, base, new_vl, vl);
1421 }
1422 
1423 //
1424 // CHECK-RV64-LABEL: @test_vle64ff_v_u64m8_m(
1425 // CHECK-RV64-NEXT:  entry:
1426 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
1427 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 8 x i64>, i64 } @llvm.riscv.vleff.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1428 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x i64>, i64 } [[TMP1]], 0
1429 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x i64>, i64 } [[TMP1]], 1
1430 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1431 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP2]]
1432 //
test_vle64ff_v_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,const uint64_t * base,size_t * new_vl,size_t vl)1433 vuint64m8_t test_vle64ff_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) {
1434   return vle64ff_v_u64m8_m(mask, maskedoff, base, new_vl, vl);
1435 }
1436 
1437 //
1438 // CHECK-RV64-LABEL: @test_vle64ff_v_f64m1_m(
1439 // CHECK-RV64-NEXT:  entry:
1440 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
1441 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1442 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x double>, i64 } [[TMP1]], 0
1443 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x double>, i64 } [[TMP1]], 1
1444 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1445 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP2]]
1446 //
test_vle64ff_v_f64m1_m(vbool64_t mask,vfloat64m1_t maskedoff,const double * base,size_t * new_vl,size_t vl)1447 vfloat64m1_t test_vle64ff_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t *new_vl, size_t vl) {
1448   return vle64ff_v_f64m1_m(mask, maskedoff, base, new_vl, vl);
1449 }
1450 
1451 //
1452 // CHECK-RV64-LABEL: @test_vle64ff_v_f64m2_m(
1453 // CHECK-RV64-NEXT:  entry:
1454 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
1455 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 2 x double>, i64 } @llvm.riscv.vleff.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1456 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 2 x double>, i64 } [[TMP1]], 0
1457 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 2 x double>, i64 } [[TMP1]], 1
1458 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1459 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP2]]
1460 //
test_vle64ff_v_f64m2_m(vbool32_t mask,vfloat64m2_t maskedoff,const double * base,size_t * new_vl,size_t vl)1461 vfloat64m2_t test_vle64ff_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t *new_vl, size_t vl) {
1462   return vle64ff_v_f64m2_m(mask, maskedoff, base, new_vl, vl);
1463 }
1464 
1465 //
1466 // CHECK-RV64-LABEL: @test_vle64ff_v_f64m4_m(
1467 // CHECK-RV64-NEXT:  entry:
1468 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
1469 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 4 x double>, i64 } @llvm.riscv.vleff.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1470 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 4 x double>, i64 } [[TMP1]], 0
1471 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 4 x double>, i64 } [[TMP1]], 1
1472 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1473 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP2]]
1474 //
test_vle64ff_v_f64m4_m(vbool16_t mask,vfloat64m4_t maskedoff,const double * base,size_t * new_vl,size_t vl)1475 vfloat64m4_t test_vle64ff_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t *new_vl, size_t vl) {
1476   return vle64ff_v_f64m4_m(mask, maskedoff, base, new_vl, vl);
1477 }
1478 
1479 //
1480 // CHECK-RV64-LABEL: @test_vle64ff_v_f64m8_m(
1481 // CHECK-RV64-NEXT:  entry:
1482 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
1483 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = call { <vscale x 8 x double>, i64 } @llvm.riscv.vleff.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1484 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 8 x double>, i64 } [[TMP1]], 0
1485 // CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 8 x double>, i64 } [[TMP1]], 1
1486 // CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
1487 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP2]]
1488 //
test_vle64ff_v_f64m8_m(vbool8_t mask,vfloat64m8_t maskedoff,const double * base,size_t * new_vl,size_t vl)1489 vfloat64m8_t test_vle64ff_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t *new_vl, size_t vl) {
1490   return vle64ff_v_f64m8_m(mask, maskedoff, base, new_vl, vl);
1491 }
1492 
1493