1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
4 
5 #include <riscv_vector.h>
6 
7 //
8 // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8(
9 // CHECK-RV64-NEXT:  entry:
10 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
11 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
12 //
test_vsrl_vv_u8mf8(vuint8mf8_t op1,vuint8mf8_t shift,size_t vl)13 vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
14   return vsrl_vv_u8mf8(op1, shift, vl);
15 }
16 
17 //
18 // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8(
19 // CHECK-RV64-NEXT:  entry:
20 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
21 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
22 //
test_vsrl_vx_u8mf8(vuint8mf8_t op1,size_t shift,size_t vl)23 vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
24   return vsrl_vx_u8mf8(op1, shift, vl);
25 }
26 
27 //
28 // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4(
29 // CHECK-RV64-NEXT:  entry:
30 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
31 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
32 //
test_vsrl_vv_u8mf4(vuint8mf4_t op1,vuint8mf4_t shift,size_t vl)33 vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
34   return vsrl_vv_u8mf4(op1, shift, vl);
35 }
36 
37 //
38 // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4(
39 // CHECK-RV64-NEXT:  entry:
40 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
41 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
42 //
test_vsrl_vx_u8mf4(vuint8mf4_t op1,size_t shift,size_t vl)43 vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
44   return vsrl_vx_u8mf4(op1, shift, vl);
45 }
46 
47 //
48 // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2(
49 // CHECK-RV64-NEXT:  entry:
50 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
51 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
52 //
test_vsrl_vv_u8mf2(vuint8mf2_t op1,vuint8mf2_t shift,size_t vl)53 vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
54   return vsrl_vv_u8mf2(op1, shift, vl);
55 }
56 
57 //
58 // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2(
59 // CHECK-RV64-NEXT:  entry:
60 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
61 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
62 //
test_vsrl_vx_u8mf2(vuint8mf2_t op1,size_t shift,size_t vl)63 vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
64   return vsrl_vx_u8mf2(op1, shift, vl);
65 }
66 
67 //
68 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1(
69 // CHECK-RV64-NEXT:  entry:
70 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
71 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
72 //
test_vsrl_vv_u8m1(vuint8m1_t op1,vuint8m1_t shift,size_t vl)73 vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
74   return vsrl_vv_u8m1(op1, shift, vl);
75 }
76 
77 //
78 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1(
79 // CHECK-RV64-NEXT:  entry:
80 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
81 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
82 //
test_vsrl_vx_u8m1(vuint8m1_t op1,size_t shift,size_t vl)83 vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
84   return vsrl_vx_u8m1(op1, shift, vl);
85 }
86 
87 //
88 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2(
89 // CHECK-RV64-NEXT:  entry:
90 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
91 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
92 //
test_vsrl_vv_u8m2(vuint8m2_t op1,vuint8m2_t shift,size_t vl)93 vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
94   return vsrl_vv_u8m2(op1, shift, vl);
95 }
96 
97 //
98 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2(
99 // CHECK-RV64-NEXT:  entry:
100 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
101 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
102 //
test_vsrl_vx_u8m2(vuint8m2_t op1,size_t shift,size_t vl)103 vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
104   return vsrl_vx_u8m2(op1, shift, vl);
105 }
106 
107 //
108 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4(
109 // CHECK-RV64-NEXT:  entry:
110 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
111 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
112 //
test_vsrl_vv_u8m4(vuint8m4_t op1,vuint8m4_t shift,size_t vl)113 vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
114   return vsrl_vv_u8m4(op1, shift, vl);
115 }
116 
117 //
118 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4(
119 // CHECK-RV64-NEXT:  entry:
120 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
121 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
122 //
test_vsrl_vx_u8m4(vuint8m4_t op1,size_t shift,size_t vl)123 vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
124   return vsrl_vx_u8m4(op1, shift, vl);
125 }
126 
127 //
128 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8(
129 // CHECK-RV64-NEXT:  entry:
130 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
131 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
132 //
test_vsrl_vv_u8m8(vuint8m8_t op1,vuint8m8_t shift,size_t vl)133 vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
134   return vsrl_vv_u8m8(op1, shift, vl);
135 }
136 
137 //
138 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8(
139 // CHECK-RV64-NEXT:  entry:
140 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
141 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
142 //
test_vsrl_vx_u8m8(vuint8m8_t op1,size_t shift,size_t vl)143 vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
144   return vsrl_vx_u8m8(op1, shift, vl);
145 }
146 
147 //
148 // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4(
149 // CHECK-RV64-NEXT:  entry:
150 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
151 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
152 //
test_vsrl_vv_u16mf4(vuint16mf4_t op1,vuint16mf4_t shift,size_t vl)153 vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
154   return vsrl_vv_u16mf4(op1, shift, vl);
155 }
156 
157 //
158 // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4(
159 // CHECK-RV64-NEXT:  entry:
160 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
161 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
162 //
test_vsrl_vx_u16mf4(vuint16mf4_t op1,size_t shift,size_t vl)163 vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
164   return vsrl_vx_u16mf4(op1, shift, vl);
165 }
166 
167 //
168 // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2(
169 // CHECK-RV64-NEXT:  entry:
170 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
171 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
172 //
test_vsrl_vv_u16mf2(vuint16mf2_t op1,vuint16mf2_t shift,size_t vl)173 vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
174   return vsrl_vv_u16mf2(op1, shift, vl);
175 }
176 
177 //
178 // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2(
179 // CHECK-RV64-NEXT:  entry:
180 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
181 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
182 //
test_vsrl_vx_u16mf2(vuint16mf2_t op1,size_t shift,size_t vl)183 vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
184   return vsrl_vx_u16mf2(op1, shift, vl);
185 }
186 
187 //
188 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1(
189 // CHECK-RV64-NEXT:  entry:
190 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
191 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
192 //
test_vsrl_vv_u16m1(vuint16m1_t op1,vuint16m1_t shift,size_t vl)193 vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
194   return vsrl_vv_u16m1(op1, shift, vl);
195 }
196 
197 //
198 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1(
199 // CHECK-RV64-NEXT:  entry:
200 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
201 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
202 //
test_vsrl_vx_u16m1(vuint16m1_t op1,size_t shift,size_t vl)203 vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
204   return vsrl_vx_u16m1(op1, shift, vl);
205 }
206 
207 //
208 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2(
209 // CHECK-RV64-NEXT:  entry:
210 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
211 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
212 //
test_vsrl_vv_u16m2(vuint16m2_t op1,vuint16m2_t shift,size_t vl)213 vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
214   return vsrl_vv_u16m2(op1, shift, vl);
215 }
216 
217 //
218 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2(
219 // CHECK-RV64-NEXT:  entry:
220 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
221 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
222 //
test_vsrl_vx_u16m2(vuint16m2_t op1,size_t shift,size_t vl)223 vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
224   return vsrl_vx_u16m2(op1, shift, vl);
225 }
226 
227 //
228 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4(
229 // CHECK-RV64-NEXT:  entry:
230 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
231 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
232 //
test_vsrl_vv_u16m4(vuint16m4_t op1,vuint16m4_t shift,size_t vl)233 vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
234   return vsrl_vv_u16m4(op1, shift, vl);
235 }
236 
237 //
238 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4(
239 // CHECK-RV64-NEXT:  entry:
240 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
241 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
242 //
test_vsrl_vx_u16m4(vuint16m4_t op1,size_t shift,size_t vl)243 vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
244   return vsrl_vx_u16m4(op1, shift, vl);
245 }
246 
247 //
248 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8(
249 // CHECK-RV64-NEXT:  entry:
250 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
251 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
252 //
test_vsrl_vv_u16m8(vuint16m8_t op1,vuint16m8_t shift,size_t vl)253 vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
254   return vsrl_vv_u16m8(op1, shift, vl);
255 }
256 
257 //
258 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8(
259 // CHECK-RV64-NEXT:  entry:
260 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
261 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
262 //
test_vsrl_vx_u16m8(vuint16m8_t op1,size_t shift,size_t vl)263 vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
264   return vsrl_vx_u16m8(op1, shift, vl);
265 }
266 
267 //
268 // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2(
269 // CHECK-RV64-NEXT:  entry:
270 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
271 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
272 //
test_vsrl_vv_u32mf2(vuint32mf2_t op1,vuint32mf2_t shift,size_t vl)273 vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
274   return vsrl_vv_u32mf2(op1, shift, vl);
275 }
276 
277 //
278 // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2(
279 // CHECK-RV64-NEXT:  entry:
280 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
281 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
282 //
test_vsrl_vx_u32mf2(vuint32mf2_t op1,size_t shift,size_t vl)283 vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
284   return vsrl_vx_u32mf2(op1, shift, vl);
285 }
286 
287 //
288 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1(
289 // CHECK-RV64-NEXT:  entry:
290 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
291 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
292 //
test_vsrl_vv_u32m1(vuint32m1_t op1,vuint32m1_t shift,size_t vl)293 vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
294   return vsrl_vv_u32m1(op1, shift, vl);
295 }
296 
297 //
298 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1(
299 // CHECK-RV64-NEXT:  entry:
300 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
301 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
302 //
test_vsrl_vx_u32m1(vuint32m1_t op1,size_t shift,size_t vl)303 vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
304   return vsrl_vx_u32m1(op1, shift, vl);
305 }
306 
307 //
308 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2(
309 // CHECK-RV64-NEXT:  entry:
310 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
311 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
312 //
test_vsrl_vv_u32m2(vuint32m2_t op1,vuint32m2_t shift,size_t vl)313 vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
314   return vsrl_vv_u32m2(op1, shift, vl);
315 }
316 
317 //
318 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2(
319 // CHECK-RV64-NEXT:  entry:
320 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
321 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
322 //
test_vsrl_vx_u32m2(vuint32m2_t op1,size_t shift,size_t vl)323 vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
324   return vsrl_vx_u32m2(op1, shift, vl);
325 }
326 
327 //
328 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4(
329 // CHECK-RV64-NEXT:  entry:
330 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
331 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
332 //
test_vsrl_vv_u32m4(vuint32m4_t op1,vuint32m4_t shift,size_t vl)333 vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
334   return vsrl_vv_u32m4(op1, shift, vl);
335 }
336 
337 //
338 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4(
339 // CHECK-RV64-NEXT:  entry:
340 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
341 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
342 //
test_vsrl_vx_u32m4(vuint32m4_t op1,size_t shift,size_t vl)343 vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
344   return vsrl_vx_u32m4(op1, shift, vl);
345 }
346 
347 //
348 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8(
349 // CHECK-RV64-NEXT:  entry:
350 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
351 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
352 //
test_vsrl_vv_u32m8(vuint32m8_t op1,vuint32m8_t shift,size_t vl)353 vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
354   return vsrl_vv_u32m8(op1, shift, vl);
355 }
356 
357 //
358 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8(
359 // CHECK-RV64-NEXT:  entry:
360 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
361 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
362 //
test_vsrl_vx_u32m8(vuint32m8_t op1,size_t shift,size_t vl)363 vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
364   return vsrl_vx_u32m8(op1, shift, vl);
365 }
366 
367 //
368 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1(
369 // CHECK-RV64-NEXT:  entry:
370 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
371 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
372 //
test_vsrl_vv_u64m1(vuint64m1_t op1,vuint64m1_t shift,size_t vl)373 vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
374   return vsrl_vv_u64m1(op1, shift, vl);
375 }
376 
377 //
378 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1(
379 // CHECK-RV64-NEXT:  entry:
380 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
381 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
382 //
test_vsrl_vx_u64m1(vuint64m1_t op1,size_t shift,size_t vl)383 vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
384   return vsrl_vx_u64m1(op1, shift, vl);
385 }
386 
387 //
388 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2(
389 // CHECK-RV64-NEXT:  entry:
390 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
391 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
392 //
test_vsrl_vv_u64m2(vuint64m2_t op1,vuint64m2_t shift,size_t vl)393 vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
394   return vsrl_vv_u64m2(op1, shift, vl);
395 }
396 
397 //
398 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2(
399 // CHECK-RV64-NEXT:  entry:
400 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
401 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
402 //
test_vsrl_vx_u64m2(vuint64m2_t op1,size_t shift,size_t vl)403 vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
404   return vsrl_vx_u64m2(op1, shift, vl);
405 }
406 
407 //
408 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4(
409 // CHECK-RV64-NEXT:  entry:
410 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
411 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
412 //
test_vsrl_vv_u64m4(vuint64m4_t op1,vuint64m4_t shift,size_t vl)413 vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
414   return vsrl_vv_u64m4(op1, shift, vl);
415 }
416 
417 //
418 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4(
419 // CHECK-RV64-NEXT:  entry:
420 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
421 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
422 //
test_vsrl_vx_u64m4(vuint64m4_t op1,size_t shift,size_t vl)423 vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
424   return vsrl_vx_u64m4(op1, shift, vl);
425 }
426 
427 //
428 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8(
429 // CHECK-RV64-NEXT:  entry:
430 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
431 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
432 //
test_vsrl_vv_u64m8(vuint64m8_t op1,vuint64m8_t shift,size_t vl)433 vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
434   return vsrl_vv_u64m8(op1, shift, vl);
435 }
436 
437 //
438 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8(
439 // CHECK-RV64-NEXT:  entry:
440 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
441 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
442 //
test_vsrl_vx_u64m8(vuint64m8_t op1,size_t shift,size_t vl)443 vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
444   return vsrl_vx_u64m8(op1, shift, vl);
445 }
446 
447 //
448 // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8_m(
449 // CHECK-RV64-NEXT:  entry:
450 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
451 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
452 //
test_vsrl_vv_u8mf8_m(vbool64_t mask,vuint8mf8_t maskedoff,vuint8mf8_t op1,vuint8mf8_t shift,size_t vl)453 vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
454   return vsrl_vv_u8mf8_m(mask, maskedoff, op1, shift, vl);
455 }
456 
457 //
458 // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_m(
459 // CHECK-RV64-NEXT:  entry:
460 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
461 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
462 //
test_vsrl_vx_u8mf8_m(vbool64_t mask,vuint8mf8_t maskedoff,vuint8mf8_t op1,size_t shift,size_t vl)463 vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
464   return vsrl_vx_u8mf8_m(mask, maskedoff, op1, shift, vl);
465 }
466 
467 //
468 // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_m(
469 // CHECK-RV64-NEXT:  entry:
470 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
471 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
472 //
test_vsrl_vv_u8mf4_m(vbool32_t mask,vuint8mf4_t maskedoff,vuint8mf4_t op1,vuint8mf4_t shift,size_t vl)473 vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
474   return vsrl_vv_u8mf4_m(mask, maskedoff, op1, shift, vl);
475 }
476 
477 //
478 // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_m(
479 // CHECK-RV64-NEXT:  entry:
480 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
481 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
482 //
test_vsrl_vx_u8mf4_m(vbool32_t mask,vuint8mf4_t maskedoff,vuint8mf4_t op1,size_t shift,size_t vl)483 vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
484   return vsrl_vx_u8mf4_m(mask, maskedoff, op1, shift, vl);
485 }
486 
487 //
488 // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_m(
489 // CHECK-RV64-NEXT:  entry:
490 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
491 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
492 //
test_vsrl_vv_u8mf2_m(vbool16_t mask,vuint8mf2_t maskedoff,vuint8mf2_t op1,vuint8mf2_t shift,size_t vl)493 vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
494   return vsrl_vv_u8mf2_m(mask, maskedoff, op1, shift, vl);
495 }
496 
497 //
498 // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_m(
499 // CHECK-RV64-NEXT:  entry:
500 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
501 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
502 //
test_vsrl_vx_u8mf2_m(vbool16_t mask,vuint8mf2_t maskedoff,vuint8mf2_t op1,size_t shift,size_t vl)503 vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
504   return vsrl_vx_u8mf2_m(mask, maskedoff, op1, shift, vl);
505 }
506 
507 //
508 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_m(
509 // CHECK-RV64-NEXT:  entry:
510 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
511 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
512 //
test_vsrl_vv_u8m1_m(vbool8_t mask,vuint8m1_t maskedoff,vuint8m1_t op1,vuint8m1_t shift,size_t vl)513 vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
514   return vsrl_vv_u8m1_m(mask, maskedoff, op1, shift, vl);
515 }
516 
517 //
518 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_m(
519 // CHECK-RV64-NEXT:  entry:
520 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
521 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
522 //
test_vsrl_vx_u8m1_m(vbool8_t mask,vuint8m1_t maskedoff,vuint8m1_t op1,size_t shift,size_t vl)523 vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
524   return vsrl_vx_u8m1_m(mask, maskedoff, op1, shift, vl);
525 }
526 
527 //
528 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_m(
529 // CHECK-RV64-NEXT:  entry:
530 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
531 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
532 //
test_vsrl_vv_u8m2_m(vbool4_t mask,vuint8m2_t maskedoff,vuint8m2_t op1,vuint8m2_t shift,size_t vl)533 vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
534   return vsrl_vv_u8m2_m(mask, maskedoff, op1, shift, vl);
535 }
536 
537 //
538 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_m(
539 // CHECK-RV64-NEXT:  entry:
540 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
541 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
542 //
test_vsrl_vx_u8m2_m(vbool4_t mask,vuint8m2_t maskedoff,vuint8m2_t op1,size_t shift,size_t vl)543 vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
544   return vsrl_vx_u8m2_m(mask, maskedoff, op1, shift, vl);
545 }
546 
547 //
548 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_m(
549 // CHECK-RV64-NEXT:  entry:
550 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
551 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
552 //
test_vsrl_vv_u8m4_m(vbool2_t mask,vuint8m4_t maskedoff,vuint8m4_t op1,vuint8m4_t shift,size_t vl)553 vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
554   return vsrl_vv_u8m4_m(mask, maskedoff, op1, shift, vl);
555 }
556 
557 //
558 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_m(
559 // CHECK-RV64-NEXT:  entry:
560 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
561 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
562 //
test_vsrl_vx_u8m4_m(vbool2_t mask,vuint8m4_t maskedoff,vuint8m4_t op1,size_t shift,size_t vl)563 vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
564   return vsrl_vx_u8m4_m(mask, maskedoff, op1, shift, vl);
565 }
566 
567 //
568 // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_m(
569 // CHECK-RV64-NEXT:  entry:
570 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
571 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
572 //
test_vsrl_vv_u8m8_m(vbool1_t mask,vuint8m8_t maskedoff,vuint8m8_t op1,vuint8m8_t shift,size_t vl)573 vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
574   return vsrl_vv_u8m8_m(mask, maskedoff, op1, shift, vl);
575 }
576 
577 //
578 // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_m(
579 // CHECK-RV64-NEXT:  entry:
580 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
581 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
582 //
test_vsrl_vx_u8m8_m(vbool1_t mask,vuint8m8_t maskedoff,vuint8m8_t op1,size_t shift,size_t vl)583 vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
584   return vsrl_vx_u8m8_m(mask, maskedoff, op1, shift, vl);
585 }
586 
587 //
588 // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_m(
589 // CHECK-RV64-NEXT:  entry:
590 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
591 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
592 //
test_vsrl_vv_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,vuint16mf4_t shift,size_t vl)593 vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
594   return vsrl_vv_u16mf4_m(mask, maskedoff, op1, shift, vl);
595 }
596 
597 //
598 // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_m(
599 // CHECK-RV64-NEXT:  entry:
600 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
601 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
602 //
test_vsrl_vx_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,size_t shift,size_t vl)603 vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
604   return vsrl_vx_u16mf4_m(mask, maskedoff, op1, shift, vl);
605 }
606 
607 //
608 // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_m(
609 // CHECK-RV64-NEXT:  entry:
610 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
611 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
612 //
test_vsrl_vv_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,vuint16mf2_t shift,size_t vl)613 vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
614   return vsrl_vv_u16mf2_m(mask, maskedoff, op1, shift, vl);
615 }
616 
617 //
618 // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_m(
619 // CHECK-RV64-NEXT:  entry:
620 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
621 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
622 //
test_vsrl_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,size_t shift,size_t vl)623 vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
624   return vsrl_vx_u16mf2_m(mask, maskedoff, op1, shift, vl);
625 }
626 
627 //
628 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_m(
629 // CHECK-RV64-NEXT:  entry:
630 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
631 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
632 //
test_vsrl_vv_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,vuint16m1_t shift,size_t vl)633 vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
634   return vsrl_vv_u16m1_m(mask, maskedoff, op1, shift, vl);
635 }
636 
637 //
638 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_m(
639 // CHECK-RV64-NEXT:  entry:
640 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
641 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
642 //
test_vsrl_vx_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,size_t shift,size_t vl)643 vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
644   return vsrl_vx_u16m1_m(mask, maskedoff, op1, shift, vl);
645 }
646 
647 //
648 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_m(
649 // CHECK-RV64-NEXT:  entry:
650 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
651 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
652 //
test_vsrl_vv_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,vuint16m2_t shift,size_t vl)653 vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
654   return vsrl_vv_u16m2_m(mask, maskedoff, op1, shift, vl);
655 }
656 
657 //
658 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_m(
659 // CHECK-RV64-NEXT:  entry:
660 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
661 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
662 //
test_vsrl_vx_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,size_t shift,size_t vl)663 vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
664   return vsrl_vx_u16m2_m(mask, maskedoff, op1, shift, vl);
665 }
666 
667 //
668 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_m(
669 // CHECK-RV64-NEXT:  entry:
670 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
671 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
672 //
test_vsrl_vv_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,vuint16m4_t shift,size_t vl)673 vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
674   return vsrl_vv_u16m4_m(mask, maskedoff, op1, shift, vl);
675 }
676 
677 //
678 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_m(
679 // CHECK-RV64-NEXT:  entry:
680 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
681 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
682 //
test_vsrl_vx_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,size_t shift,size_t vl)683 vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
684   return vsrl_vx_u16m4_m(mask, maskedoff, op1, shift, vl);
685 }
686 
687 //
688 // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_m(
689 // CHECK-RV64-NEXT:  entry:
690 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
691 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
692 //
test_vsrl_vv_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,vuint16m8_t shift,size_t vl)693 vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
694   return vsrl_vv_u16m8_m(mask, maskedoff, op1, shift, vl);
695 }
696 
697 //
698 // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_m(
699 // CHECK-RV64-NEXT:  entry:
700 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
701 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
702 //
test_vsrl_vx_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,size_t shift,size_t vl)703 vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
704   return vsrl_vx_u16m8_m(mask, maskedoff, op1, shift, vl);
705 }
706 
707 //
708 // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_m(
709 // CHECK-RV64-NEXT:  entry:
710 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
711 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
712 //
test_vsrl_vv_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,vuint32mf2_t shift,size_t vl)713 vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
714   return vsrl_vv_u32mf2_m(mask, maskedoff, op1, shift, vl);
715 }
716 
717 //
718 // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_m(
719 // CHECK-RV64-NEXT:  entry:
720 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
721 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
722 //
test_vsrl_vx_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,size_t shift,size_t vl)723 vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
724   return vsrl_vx_u32mf2_m(mask, maskedoff, op1, shift, vl);
725 }
726 
727 //
728 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_m(
729 // CHECK-RV64-NEXT:  entry:
730 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
731 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
732 //
test_vsrl_vv_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,vuint32m1_t shift,size_t vl)733 vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
734   return vsrl_vv_u32m1_m(mask, maskedoff, op1, shift, vl);
735 }
736 
737 //
738 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_m(
739 // CHECK-RV64-NEXT:  entry:
740 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
741 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
742 //
test_vsrl_vx_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,size_t shift,size_t vl)743 vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
744   return vsrl_vx_u32m1_m(mask, maskedoff, op1, shift, vl);
745 }
746 
747 //
748 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_m(
749 // CHECK-RV64-NEXT:  entry:
750 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
751 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
752 //
test_vsrl_vv_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,vuint32m2_t shift,size_t vl)753 vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
754   return vsrl_vv_u32m2_m(mask, maskedoff, op1, shift, vl);
755 }
756 
757 //
758 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_m(
759 // CHECK-RV64-NEXT:  entry:
760 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
761 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
762 //
test_vsrl_vx_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,size_t shift,size_t vl)763 vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
764   return vsrl_vx_u32m2_m(mask, maskedoff, op1, shift, vl);
765 }
766 
767 //
768 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_m(
769 // CHECK-RV64-NEXT:  entry:
770 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
771 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
772 //
test_vsrl_vv_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,vuint32m4_t shift,size_t vl)773 vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
774   return vsrl_vv_u32m4_m(mask, maskedoff, op1, shift, vl);
775 }
776 
777 //
778 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_m(
779 // CHECK-RV64-NEXT:  entry:
780 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
781 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
782 //
test_vsrl_vx_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,size_t shift,size_t vl)783 vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
784   return vsrl_vx_u32m4_m(mask, maskedoff, op1, shift, vl);
785 }
786 
787 //
788 // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_m(
789 // CHECK-RV64-NEXT:  entry:
790 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
791 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
792 //
test_vsrl_vv_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,vuint32m8_t shift,size_t vl)793 vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
794   return vsrl_vv_u32m8_m(mask, maskedoff, op1, shift, vl);
795 }
796 
797 //
798 // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_m(
799 // CHECK-RV64-NEXT:  entry:
800 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
801 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
802 //
test_vsrl_vx_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,size_t shift,size_t vl)803 vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
804   return vsrl_vx_u32m8_m(mask, maskedoff, op1, shift, vl);
805 }
806 
807 //
808 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_m(
809 // CHECK-RV64-NEXT:  entry:
810 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
811 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
812 //
test_vsrl_vv_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,vuint64m1_t shift,size_t vl)813 vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
814   return vsrl_vv_u64m1_m(mask, maskedoff, op1, shift, vl);
815 }
816 
817 //
818 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_m(
819 // CHECK-RV64-NEXT:  entry:
820 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
821 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
822 //
test_vsrl_vx_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,size_t shift,size_t vl)823 vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
824   return vsrl_vx_u64m1_m(mask, maskedoff, op1, shift, vl);
825 }
826 
827 //
828 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_m(
829 // CHECK-RV64-NEXT:  entry:
830 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
831 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
832 //
test_vsrl_vv_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,vuint64m2_t shift,size_t vl)833 vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
834   return vsrl_vv_u64m2_m(mask, maskedoff, op1, shift, vl);
835 }
836 
837 //
838 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_m(
839 // CHECK-RV64-NEXT:  entry:
840 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
841 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
842 //
test_vsrl_vx_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,size_t shift,size_t vl)843 vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
844   return vsrl_vx_u64m2_m(mask, maskedoff, op1, shift, vl);
845 }
846 
847 //
848 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_m(
849 // CHECK-RV64-NEXT:  entry:
850 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
851 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
852 //
test_vsrl_vv_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,vuint64m4_t shift,size_t vl)853 vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
854   return vsrl_vv_u64m4_m(mask, maskedoff, op1, shift, vl);
855 }
856 
857 //
858 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_m(
859 // CHECK-RV64-NEXT:  entry:
860 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
861 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
862 //
test_vsrl_vx_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,size_t shift,size_t vl)863 vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
864   return vsrl_vx_u64m4_m(mask, maskedoff, op1, shift, vl);
865 }
866 
867 //
868 // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_m(
869 // CHECK-RV64-NEXT:  entry:
870 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
871 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
872 //
test_vsrl_vv_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,vuint64m8_t shift,size_t vl)873 vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
874   return vsrl_vv_u64m8_m(mask, maskedoff, op1, shift, vl);
875 }
876 
877 //
878 // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_m(
879 // CHECK-RV64-NEXT:  entry:
880 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
881 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
882 //
test_vsrl_vx_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,size_t shift,size_t vl)883 vuint64m8_t test_vsrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
884   return vsrl_vx_u64m8_m(mask, maskedoff, op1, shift, vl);
885 }
886 
887