1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
4 
5 #include <riscv_vector.h>
6 
7 //
8 // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8(
9 // CHECK-RV64-NEXT:  entry:
10 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
11 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
12 //
test_vssrl_vv_u8mf8(vuint8mf8_t op1,vuint8mf8_t shift,size_t vl)13 vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
14   return vssrl_vv_u8mf8(op1, shift, vl);
15 }
16 
17 //
18 // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8(
19 // CHECK-RV64-NEXT:  entry:
20 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
21 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
22 //
test_vssrl_vx_u8mf8(vuint8mf8_t op1,size_t shift,size_t vl)23 vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
24   return vssrl_vx_u8mf8(op1, shift, vl);
25 }
26 
27 //
28 // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4(
29 // CHECK-RV64-NEXT:  entry:
30 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
31 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
32 //
test_vssrl_vv_u8mf4(vuint8mf4_t op1,vuint8mf4_t shift,size_t vl)33 vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
34   return vssrl_vv_u8mf4(op1, shift, vl);
35 }
36 
37 //
38 // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4(
39 // CHECK-RV64-NEXT:  entry:
40 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
41 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
42 //
test_vssrl_vx_u8mf4(vuint8mf4_t op1,size_t shift,size_t vl)43 vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
44   return vssrl_vx_u8mf4(op1, shift, vl);
45 }
46 
47 //
48 // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2(
49 // CHECK-RV64-NEXT:  entry:
50 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
51 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
52 //
test_vssrl_vv_u8mf2(vuint8mf2_t op1,vuint8mf2_t shift,size_t vl)53 vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
54   return vssrl_vv_u8mf2(op1, shift, vl);
55 }
56 
57 //
58 // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2(
59 // CHECK-RV64-NEXT:  entry:
60 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
61 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
62 //
test_vssrl_vx_u8mf2(vuint8mf2_t op1,size_t shift,size_t vl)63 vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
64   return vssrl_vx_u8mf2(op1, shift, vl);
65 }
66 
67 //
68 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1(
69 // CHECK-RV64-NEXT:  entry:
70 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
71 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
72 //
test_vssrl_vv_u8m1(vuint8m1_t op1,vuint8m1_t shift,size_t vl)73 vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
74   return vssrl_vv_u8m1(op1, shift, vl);
75 }
76 
77 //
78 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1(
79 // CHECK-RV64-NEXT:  entry:
80 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
81 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
82 //
test_vssrl_vx_u8m1(vuint8m1_t op1,size_t shift,size_t vl)83 vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
84   return vssrl_vx_u8m1(op1, shift, vl);
85 }
86 
87 //
88 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2(
89 // CHECK-RV64-NEXT:  entry:
90 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
91 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
92 //
test_vssrl_vv_u8m2(vuint8m2_t op1,vuint8m2_t shift,size_t vl)93 vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
94   return vssrl_vv_u8m2(op1, shift, vl);
95 }
96 
97 //
98 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2(
99 // CHECK-RV64-NEXT:  entry:
100 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
101 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
102 //
test_vssrl_vx_u8m2(vuint8m2_t op1,size_t shift,size_t vl)103 vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
104   return vssrl_vx_u8m2(op1, shift, vl);
105 }
106 
107 //
108 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4(
109 // CHECK-RV64-NEXT:  entry:
110 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
111 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
112 //
test_vssrl_vv_u8m4(vuint8m4_t op1,vuint8m4_t shift,size_t vl)113 vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
114   return vssrl_vv_u8m4(op1, shift, vl);
115 }
116 
117 //
118 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4(
119 // CHECK-RV64-NEXT:  entry:
120 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
121 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
122 //
test_vssrl_vx_u8m4(vuint8m4_t op1,size_t shift,size_t vl)123 vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
124   return vssrl_vx_u8m4(op1, shift, vl);
125 }
126 
127 //
128 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8(
129 // CHECK-RV64-NEXT:  entry:
130 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
131 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
132 //
test_vssrl_vv_u8m8(vuint8m8_t op1,vuint8m8_t shift,size_t vl)133 vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
134   return vssrl_vv_u8m8(op1, shift, vl);
135 }
136 
137 //
138 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8(
139 // CHECK-RV64-NEXT:  entry:
140 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
141 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
142 //
test_vssrl_vx_u8m8(vuint8m8_t op1,size_t shift,size_t vl)143 vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
144   return vssrl_vx_u8m8(op1, shift, vl);
145 }
146 
147 //
148 // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4(
149 // CHECK-RV64-NEXT:  entry:
150 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
151 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
152 //
test_vssrl_vv_u16mf4(vuint16mf4_t op1,vuint16mf4_t shift,size_t vl)153 vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift,
154                                   size_t vl) {
155   return vssrl_vv_u16mf4(op1, shift, vl);
156 }
157 
158 //
159 // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4(
160 // CHECK-RV64-NEXT:  entry:
161 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
162 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
163 //
test_vssrl_vx_u16mf4(vuint16mf4_t op1,size_t shift,size_t vl)164 vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
165   return vssrl_vx_u16mf4(op1, shift, vl);
166 }
167 
168 //
169 // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2(
170 // CHECK-RV64-NEXT:  entry:
171 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
172 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
173 //
test_vssrl_vv_u16mf2(vuint16mf2_t op1,vuint16mf2_t shift,size_t vl)174 vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift,
175                                   size_t vl) {
176   return vssrl_vv_u16mf2(op1, shift, vl);
177 }
178 
179 //
180 // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2(
181 // CHECK-RV64-NEXT:  entry:
182 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
183 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
184 //
test_vssrl_vx_u16mf2(vuint16mf2_t op1,size_t shift,size_t vl)185 vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
186   return vssrl_vx_u16mf2(op1, shift, vl);
187 }
188 
189 //
190 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1(
191 // CHECK-RV64-NEXT:  entry:
192 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
193 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
194 //
test_vssrl_vv_u16m1(vuint16m1_t op1,vuint16m1_t shift,size_t vl)195 vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
196   return vssrl_vv_u16m1(op1, shift, vl);
197 }
198 
199 //
200 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1(
201 // CHECK-RV64-NEXT:  entry:
202 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
203 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
204 //
test_vssrl_vx_u16m1(vuint16m1_t op1,size_t shift,size_t vl)205 vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
206   return vssrl_vx_u16m1(op1, shift, vl);
207 }
208 
209 //
210 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2(
211 // CHECK-RV64-NEXT:  entry:
212 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
213 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
214 //
test_vssrl_vv_u16m2(vuint16m2_t op1,vuint16m2_t shift,size_t vl)215 vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
216   return vssrl_vv_u16m2(op1, shift, vl);
217 }
218 
219 //
220 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2(
221 // CHECK-RV64-NEXT:  entry:
222 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
223 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
224 //
test_vssrl_vx_u16m2(vuint16m2_t op1,size_t shift,size_t vl)225 vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
226   return vssrl_vx_u16m2(op1, shift, vl);
227 }
228 
229 //
230 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4(
231 // CHECK-RV64-NEXT:  entry:
232 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
233 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
234 //
test_vssrl_vv_u16m4(vuint16m4_t op1,vuint16m4_t shift,size_t vl)235 vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
236   return vssrl_vv_u16m4(op1, shift, vl);
237 }
238 
239 //
240 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4(
241 // CHECK-RV64-NEXT:  entry:
242 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
243 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
244 //
test_vssrl_vx_u16m4(vuint16m4_t op1,size_t shift,size_t vl)245 vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
246   return vssrl_vx_u16m4(op1, shift, vl);
247 }
248 
249 //
250 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8(
251 // CHECK-RV64-NEXT:  entry:
252 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
253 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
254 //
test_vssrl_vv_u16m8(vuint16m8_t op1,vuint16m8_t shift,size_t vl)255 vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
256   return vssrl_vv_u16m8(op1, shift, vl);
257 }
258 
259 //
260 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8(
261 // CHECK-RV64-NEXT:  entry:
262 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
263 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
264 //
test_vssrl_vx_u16m8(vuint16m8_t op1,size_t shift,size_t vl)265 vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
266   return vssrl_vx_u16m8(op1, shift, vl);
267 }
268 
269 //
270 // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2(
271 // CHECK-RV64-NEXT:  entry:
272 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
273 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
274 //
test_vssrl_vv_u32mf2(vuint32mf2_t op1,vuint32mf2_t shift,size_t vl)275 vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift,
276                                   size_t vl) {
277   return vssrl_vv_u32mf2(op1, shift, vl);
278 }
279 
280 //
281 // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2(
282 // CHECK-RV64-NEXT:  entry:
283 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
284 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
285 //
test_vssrl_vx_u32mf2(vuint32mf2_t op1,size_t shift,size_t vl)286 vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
287   return vssrl_vx_u32mf2(op1, shift, vl);
288 }
289 
290 //
291 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1(
292 // CHECK-RV64-NEXT:  entry:
293 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
294 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
295 //
test_vssrl_vv_u32m1(vuint32m1_t op1,vuint32m1_t shift,size_t vl)296 vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
297   return vssrl_vv_u32m1(op1, shift, vl);
298 }
299 
300 //
301 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1(
302 // CHECK-RV64-NEXT:  entry:
303 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
304 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
305 //
test_vssrl_vx_u32m1(vuint32m1_t op1,size_t shift,size_t vl)306 vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
307   return vssrl_vx_u32m1(op1, shift, vl);
308 }
309 
310 //
311 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2(
312 // CHECK-RV64-NEXT:  entry:
313 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
314 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
315 //
test_vssrl_vv_u32m2(vuint32m2_t op1,vuint32m2_t shift,size_t vl)316 vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
317   return vssrl_vv_u32m2(op1, shift, vl);
318 }
319 
320 //
321 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2(
322 // CHECK-RV64-NEXT:  entry:
323 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
324 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
325 //
test_vssrl_vx_u32m2(vuint32m2_t op1,size_t shift,size_t vl)326 vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
327   return vssrl_vx_u32m2(op1, shift, vl);
328 }
329 
330 //
331 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4(
332 // CHECK-RV64-NEXT:  entry:
333 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
334 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
335 //
test_vssrl_vv_u32m4(vuint32m4_t op1,vuint32m4_t shift,size_t vl)336 vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
337   return vssrl_vv_u32m4(op1, shift, vl);
338 }
339 
340 //
341 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4(
342 // CHECK-RV64-NEXT:  entry:
343 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
344 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
345 //
test_vssrl_vx_u32m4(vuint32m4_t op1,size_t shift,size_t vl)346 vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
347   return vssrl_vx_u32m4(op1, shift, vl);
348 }
349 
350 //
351 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8(
352 // CHECK-RV64-NEXT:  entry:
353 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
354 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
355 //
test_vssrl_vv_u32m8(vuint32m8_t op1,vuint32m8_t shift,size_t vl)356 vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
357   return vssrl_vv_u32m8(op1, shift, vl);
358 }
359 
360 //
361 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8(
362 // CHECK-RV64-NEXT:  entry:
363 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
364 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
365 //
test_vssrl_vx_u32m8(vuint32m8_t op1,size_t shift,size_t vl)366 vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
367   return vssrl_vx_u32m8(op1, shift, vl);
368 }
369 
370 //
371 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1(
372 // CHECK-RV64-NEXT:  entry:
373 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
374 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
375 //
test_vssrl_vv_u64m1(vuint64m1_t op1,vuint64m1_t shift,size_t vl)376 vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
377   return vssrl_vv_u64m1(op1, shift, vl);
378 }
379 
380 //
381 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1(
382 // CHECK-RV64-NEXT:  entry:
383 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
384 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
385 //
test_vssrl_vx_u64m1(vuint64m1_t op1,size_t shift,size_t vl)386 vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
387   return vssrl_vx_u64m1(op1, shift, vl);
388 }
389 
390 //
391 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2(
392 // CHECK-RV64-NEXT:  entry:
393 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
394 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
395 //
test_vssrl_vv_u64m2(vuint64m2_t op1,vuint64m2_t shift,size_t vl)396 vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
397   return vssrl_vv_u64m2(op1, shift, vl);
398 }
399 
400 //
401 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2(
402 // CHECK-RV64-NEXT:  entry:
403 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
404 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
405 //
test_vssrl_vx_u64m2(vuint64m2_t op1,size_t shift,size_t vl)406 vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
407   return vssrl_vx_u64m2(op1, shift, vl);
408 }
409 
410 //
411 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4(
412 // CHECK-RV64-NEXT:  entry:
413 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
414 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
415 //
test_vssrl_vv_u64m4(vuint64m4_t op1,vuint64m4_t shift,size_t vl)416 vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
417   return vssrl_vv_u64m4(op1, shift, vl);
418 }
419 
420 //
421 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4(
422 // CHECK-RV64-NEXT:  entry:
423 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
424 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
425 //
test_vssrl_vx_u64m4(vuint64m4_t op1,size_t shift,size_t vl)426 vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
427   return vssrl_vx_u64m4(op1, shift, vl);
428 }
429 
430 //
431 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8(
432 // CHECK-RV64-NEXT:  entry:
433 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
434 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
435 //
test_vssrl_vv_u64m8(vuint64m8_t op1,vuint64m8_t shift,size_t vl)436 vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
437   return vssrl_vv_u64m8(op1, shift, vl);
438 }
439 
440 //
441 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8(
442 // CHECK-RV64-NEXT:  entry:
443 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
444 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
445 //
test_vssrl_vx_u64m8(vuint64m8_t op1,size_t shift,size_t vl)446 vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
447   return vssrl_vx_u64m8(op1, shift, vl);
448 }
449 
450 //
451 // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8_m(
452 // CHECK-RV64-NEXT:  entry:
453 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
454 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
455 //
test_vssrl_vv_u8mf8_m(vbool64_t mask,vuint8mf8_t maskedoff,vuint8mf8_t op1,vuint8mf8_t shift,size_t vl)456 vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
457                                   vuint8mf8_t op1, vuint8mf8_t shift,
458                                   size_t vl) {
459   return vssrl_vv_u8mf8_m(mask, maskedoff, op1, shift, vl);
460 }
461 
462 //
463 // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8_m(
464 // CHECK-RV64-NEXT:  entry:
465 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
466 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
467 //
test_vssrl_vx_u8mf8_m(vbool64_t mask,vuint8mf8_t maskedoff,vuint8mf8_t op1,size_t shift,size_t vl)468 vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
469                                   vuint8mf8_t op1, size_t shift, size_t vl) {
470   return vssrl_vx_u8mf8_m(mask, maskedoff, op1, shift, vl);
471 }
472 
473 //
474 // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4_m(
475 // CHECK-RV64-NEXT:  entry:
476 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
477 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
478 //
test_vssrl_vv_u8mf4_m(vbool32_t mask,vuint8mf4_t maskedoff,vuint8mf4_t op1,vuint8mf4_t shift,size_t vl)479 vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
480                                   vuint8mf4_t op1, vuint8mf4_t shift,
481                                   size_t vl) {
482   return vssrl_vv_u8mf4_m(mask, maskedoff, op1, shift, vl);
483 }
484 
485 //
486 // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4_m(
487 // CHECK-RV64-NEXT:  entry:
488 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
489 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
490 //
test_vssrl_vx_u8mf4_m(vbool32_t mask,vuint8mf4_t maskedoff,vuint8mf4_t op1,size_t shift,size_t vl)491 vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
492                                   vuint8mf4_t op1, size_t shift, size_t vl) {
493   return vssrl_vx_u8mf4_m(mask, maskedoff, op1, shift, vl);
494 }
495 
496 //
497 // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2_m(
498 // CHECK-RV64-NEXT:  entry:
499 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
500 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
501 //
test_vssrl_vv_u8mf2_m(vbool16_t mask,vuint8mf2_t maskedoff,vuint8mf2_t op1,vuint8mf2_t shift,size_t vl)502 vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
503                                   vuint8mf2_t op1, vuint8mf2_t shift,
504                                   size_t vl) {
505   return vssrl_vv_u8mf2_m(mask, maskedoff, op1, shift, vl);
506 }
507 
508 //
509 // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2_m(
510 // CHECK-RV64-NEXT:  entry:
511 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
512 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
513 //
test_vssrl_vx_u8mf2_m(vbool16_t mask,vuint8mf2_t maskedoff,vuint8mf2_t op1,size_t shift,size_t vl)514 vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
515                                   vuint8mf2_t op1, size_t shift, size_t vl) {
516   return vssrl_vx_u8mf2_m(mask, maskedoff, op1, shift, vl);
517 }
518 
519 //
520 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1_m(
521 // CHECK-RV64-NEXT:  entry:
522 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
523 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
524 //
test_vssrl_vv_u8m1_m(vbool8_t mask,vuint8m1_t maskedoff,vuint8m1_t op1,vuint8m1_t shift,size_t vl)525 vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
526                                 vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
527   return vssrl_vv_u8m1_m(mask, maskedoff, op1, shift, vl);
528 }
529 
530 //
531 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1_m(
532 // CHECK-RV64-NEXT:  entry:
533 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
534 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
535 //
test_vssrl_vx_u8m1_m(vbool8_t mask,vuint8m1_t maskedoff,vuint8m1_t op1,size_t shift,size_t vl)536 vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
537                                 vuint8m1_t op1, size_t shift, size_t vl) {
538   return vssrl_vx_u8m1_m(mask, maskedoff, op1, shift, vl);
539 }
540 
541 //
542 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2_m(
543 // CHECK-RV64-NEXT:  entry:
544 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
545 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
546 //
test_vssrl_vv_u8m2_m(vbool4_t mask,vuint8m2_t maskedoff,vuint8m2_t op1,vuint8m2_t shift,size_t vl)547 vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
548                                 vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
549   return vssrl_vv_u8m2_m(mask, maskedoff, op1, shift, vl);
550 }
551 
552 //
553 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2_m(
554 // CHECK-RV64-NEXT:  entry:
555 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
556 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
557 //
test_vssrl_vx_u8m2_m(vbool4_t mask,vuint8m2_t maskedoff,vuint8m2_t op1,size_t shift,size_t vl)558 vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
559                                 vuint8m2_t op1, size_t shift, size_t vl) {
560   return vssrl_vx_u8m2_m(mask, maskedoff, op1, shift, vl);
561 }
562 
563 //
564 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4_m(
565 // CHECK-RV64-NEXT:  entry:
566 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
567 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
568 //
test_vssrl_vv_u8m4_m(vbool2_t mask,vuint8m4_t maskedoff,vuint8m4_t op1,vuint8m4_t shift,size_t vl)569 vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
570                                 vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
571   return vssrl_vv_u8m4_m(mask, maskedoff, op1, shift, vl);
572 }
573 
574 //
575 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4_m(
576 // CHECK-RV64-NEXT:  entry:
577 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
578 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
579 //
test_vssrl_vx_u8m4_m(vbool2_t mask,vuint8m4_t maskedoff,vuint8m4_t op1,size_t shift,size_t vl)580 vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
581                                 vuint8m4_t op1, size_t shift, size_t vl) {
582   return vssrl_vx_u8m4_m(mask, maskedoff, op1, shift, vl);
583 }
584 
585 //
586 // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8_m(
587 // CHECK-RV64-NEXT:  entry:
588 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
589 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
590 //
test_vssrl_vv_u8m8_m(vbool1_t mask,vuint8m8_t maskedoff,vuint8m8_t op1,vuint8m8_t shift,size_t vl)591 vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
592                                 vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
593   return vssrl_vv_u8m8_m(mask, maskedoff, op1, shift, vl);
594 }
595 
596 //
597 // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8_m(
598 // CHECK-RV64-NEXT:  entry:
599 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
600 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
601 //
test_vssrl_vx_u8m8_m(vbool1_t mask,vuint8m8_t maskedoff,vuint8m8_t op1,size_t shift,size_t vl)602 vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
603                                 vuint8m8_t op1, size_t shift, size_t vl) {
604   return vssrl_vx_u8m8_m(mask, maskedoff, op1, shift, vl);
605 }
606 
607 //
608 // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4_m(
609 // CHECK-RV64-NEXT:  entry:
610 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
611 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
612 //
test_vssrl_vv_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,vuint16mf4_t shift,size_t vl)613 vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
614                                     vuint16mf4_t op1, vuint16mf4_t shift,
615                                     size_t vl) {
616   return vssrl_vv_u16mf4_m(mask, maskedoff, op1, shift, vl);
617 }
618 
619 //
620 // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4_m(
621 // CHECK-RV64-NEXT:  entry:
622 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
623 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
624 //
test_vssrl_vx_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,size_t shift,size_t vl)625 vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
626                                     vuint16mf4_t op1, size_t shift, size_t vl) {
627   return vssrl_vx_u16mf4_m(mask, maskedoff, op1, shift, vl);
628 }
629 
630 //
631 // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2_m(
632 // CHECK-RV64-NEXT:  entry:
633 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
634 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
635 //
test_vssrl_vv_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,vuint16mf2_t shift,size_t vl)636 vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
637                                     vuint16mf2_t op1, vuint16mf2_t shift,
638                                     size_t vl) {
639   return vssrl_vv_u16mf2_m(mask, maskedoff, op1, shift, vl);
640 }
641 
642 //
643 // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2_m(
644 // CHECK-RV64-NEXT:  entry:
645 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
646 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
647 //
test_vssrl_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,size_t shift,size_t vl)648 vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
649                                     vuint16mf2_t op1, size_t shift, size_t vl) {
650   return vssrl_vx_u16mf2_m(mask, maskedoff, op1, shift, vl);
651 }
652 
653 //
654 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1_m(
655 // CHECK-RV64-NEXT:  entry:
656 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
657 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
658 //
test_vssrl_vv_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,vuint16m1_t shift,size_t vl)659 vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
660                                   vuint16m1_t op1, vuint16m1_t shift,
661                                   size_t vl) {
662   return vssrl_vv_u16m1_m(mask, maskedoff, op1, shift, vl);
663 }
664 
665 //
666 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1_m(
667 // CHECK-RV64-NEXT:  entry:
668 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
669 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
670 //
test_vssrl_vx_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,size_t shift,size_t vl)671 vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
672                                   vuint16m1_t op1, size_t shift, size_t vl) {
673   return vssrl_vx_u16m1_m(mask, maskedoff, op1, shift, vl);
674 }
675 
676 //
677 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2_m(
678 // CHECK-RV64-NEXT:  entry:
679 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
680 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
681 //
test_vssrl_vv_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,vuint16m2_t shift,size_t vl)682 vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
683                                   vuint16m2_t op1, vuint16m2_t shift,
684                                   size_t vl) {
685   return vssrl_vv_u16m2_m(mask, maskedoff, op1, shift, vl);
686 }
687 
688 //
689 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2_m(
690 // CHECK-RV64-NEXT:  entry:
691 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
692 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
693 //
test_vssrl_vx_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,size_t shift,size_t vl)694 vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
695                                   vuint16m2_t op1, size_t shift, size_t vl) {
696   return vssrl_vx_u16m2_m(mask, maskedoff, op1, shift, vl);
697 }
698 
699 //
700 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4_m(
701 // CHECK-RV64-NEXT:  entry:
702 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
703 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
704 //
test_vssrl_vv_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,vuint16m4_t shift,size_t vl)705 vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
706                                   vuint16m4_t op1, vuint16m4_t shift,
707                                   size_t vl) {
708   return vssrl_vv_u16m4_m(mask, maskedoff, op1, shift, vl);
709 }
710 
711 //
712 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4_m(
713 // CHECK-RV64-NEXT:  entry:
714 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
715 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
716 //
test_vssrl_vx_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,size_t shift,size_t vl)717 vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
718                                   vuint16m4_t op1, size_t shift, size_t vl) {
719   return vssrl_vx_u16m4_m(mask, maskedoff, op1, shift, vl);
720 }
721 
722 //
723 // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8_m(
724 // CHECK-RV64-NEXT:  entry:
725 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
726 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
727 //
test_vssrl_vv_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,vuint16m8_t shift,size_t vl)728 vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
729                                   vuint16m8_t op1, vuint16m8_t shift,
730                                   size_t vl) {
731   return vssrl_vv_u16m8_m(mask, maskedoff, op1, shift, vl);
732 }
733 
734 //
735 // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8_m(
736 // CHECK-RV64-NEXT:  entry:
737 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
738 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
739 //
test_vssrl_vx_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,size_t shift,size_t vl)740 vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
741                                   vuint16m8_t op1, size_t shift, size_t vl) {
742   return vssrl_vx_u16m8_m(mask, maskedoff, op1, shift, vl);
743 }
744 
745 //
746 // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_m(
747 // CHECK-RV64-NEXT:  entry:
748 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
749 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
750 //
test_vssrl_vv_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,vuint32mf2_t shift,size_t vl)751 vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
752                                     vuint32mf2_t op1, vuint32mf2_t shift,
753                                     size_t vl) {
754   return vssrl_vv_u32mf2_m(mask, maskedoff, op1, shift, vl);
755 }
756 
757 //
758 // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_m(
759 // CHECK-RV64-NEXT:  entry:
760 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
761 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
762 //
test_vssrl_vx_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,size_t shift,size_t vl)763 vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
764                                     vuint32mf2_t op1, size_t shift, size_t vl) {
765   return vssrl_vx_u32mf2_m(mask, maskedoff, op1, shift, vl);
766 }
767 
768 //
769 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1_m(
770 // CHECK-RV64-NEXT:  entry:
771 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
772 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
773 //
test_vssrl_vv_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,vuint32m1_t shift,size_t vl)774 vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
775                                   vuint32m1_t op1, vuint32m1_t shift,
776                                   size_t vl) {
777   return vssrl_vv_u32m1_m(mask, maskedoff, op1, shift, vl);
778 }
779 
780 //
781 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1_m(
782 // CHECK-RV64-NEXT:  entry:
783 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
784 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
785 //
test_vssrl_vx_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,size_t shift,size_t vl)786 vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
787                                   vuint32m1_t op1, size_t shift, size_t vl) {
788   return vssrl_vx_u32m1_m(mask, maskedoff, op1, shift, vl);
789 }
790 
791 //
792 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2_m(
793 // CHECK-RV64-NEXT:  entry:
794 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
795 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
796 //
test_vssrl_vv_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,vuint32m2_t shift,size_t vl)797 vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
798                                   vuint32m2_t op1, vuint32m2_t shift,
799                                   size_t vl) {
800   return vssrl_vv_u32m2_m(mask, maskedoff, op1, shift, vl);
801 }
802 
803 //
804 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2_m(
805 // CHECK-RV64-NEXT:  entry:
806 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
807 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
808 //
test_vssrl_vx_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,size_t shift,size_t vl)809 vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
810                                   vuint32m2_t op1, size_t shift, size_t vl) {
811   return vssrl_vx_u32m2_m(mask, maskedoff, op1, shift, vl);
812 }
813 
814 //
815 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4_m(
816 // CHECK-RV64-NEXT:  entry:
817 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
818 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
819 //
test_vssrl_vv_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,vuint32m4_t shift,size_t vl)820 vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
821                                   vuint32m4_t op1, vuint32m4_t shift,
822                                   size_t vl) {
823   return vssrl_vv_u32m4_m(mask, maskedoff, op1, shift, vl);
824 }
825 
826 //
827 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4_m(
828 // CHECK-RV64-NEXT:  entry:
829 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
830 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
831 //
test_vssrl_vx_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,size_t shift,size_t vl)832 vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
833                                   vuint32m4_t op1, size_t shift, size_t vl) {
834   return vssrl_vx_u32m4_m(mask, maskedoff, op1, shift, vl);
835 }
836 
837 //
838 // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8_m(
839 // CHECK-RV64-NEXT:  entry:
840 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
841 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
842 //
test_vssrl_vv_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,vuint32m8_t shift,size_t vl)843 vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
844                                   vuint32m8_t op1, vuint32m8_t shift,
845                                   size_t vl) {
846   return vssrl_vv_u32m8_m(mask, maskedoff, op1, shift, vl);
847 }
848 
849 //
850 // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8_m(
851 // CHECK-RV64-NEXT:  entry:
852 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
853 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
854 //
test_vssrl_vx_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,size_t shift,size_t vl)855 vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
856                                   vuint32m8_t op1, size_t shift, size_t vl) {
857   return vssrl_vx_u32m8_m(mask, maskedoff, op1, shift, vl);
858 }
859 
860 //
861 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1_m(
862 // CHECK-RV64-NEXT:  entry:
863 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
864 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
865 //
test_vssrl_vv_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,vuint64m1_t shift,size_t vl)866 vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
867                                   vuint64m1_t op1, vuint64m1_t shift,
868                                   size_t vl) {
869   return vssrl_vv_u64m1_m(mask, maskedoff, op1, shift, vl);
870 }
871 
872 //
873 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1_m(
874 // CHECK-RV64-NEXT:  entry:
875 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
876 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
877 //
test_vssrl_vx_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,size_t shift,size_t vl)878 vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
879                                   vuint64m1_t op1, size_t shift, size_t vl) {
880   return vssrl_vx_u64m1_m(mask, maskedoff, op1, shift, vl);
881 }
882 
883 //
884 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2_m(
885 // CHECK-RV64-NEXT:  entry:
886 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
887 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
888 //
test_vssrl_vv_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,vuint64m2_t shift,size_t vl)889 vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
890                                   vuint64m2_t op1, vuint64m2_t shift,
891                                   size_t vl) {
892   return vssrl_vv_u64m2_m(mask, maskedoff, op1, shift, vl);
893 }
894 
895 //
896 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2_m(
897 // CHECK-RV64-NEXT:  entry:
898 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
899 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
900 //
test_vssrl_vx_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,size_t shift,size_t vl)901 vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
902                                   vuint64m2_t op1, size_t shift, size_t vl) {
903   return vssrl_vx_u64m2_m(mask, maskedoff, op1, shift, vl);
904 }
905 
906 //
907 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4_m(
908 // CHECK-RV64-NEXT:  entry:
909 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
910 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
911 //
test_vssrl_vv_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,vuint64m4_t shift,size_t vl)912 vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
913                                   vuint64m4_t op1, vuint64m4_t shift,
914                                   size_t vl) {
915   return vssrl_vv_u64m4_m(mask, maskedoff, op1, shift, vl);
916 }
917 
918 //
919 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4_m(
920 // CHECK-RV64-NEXT:  entry:
921 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
922 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
923 //
test_vssrl_vx_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,size_t shift,size_t vl)924 vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
925                                   vuint64m4_t op1, size_t shift, size_t vl) {
926   return vssrl_vx_u64m4_m(mask, maskedoff, op1, shift, vl);
927 }
928 
929 //
930 // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8_m(
931 // CHECK-RV64-NEXT:  entry:
932 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
933 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
934 //
test_vssrl_vv_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,vuint64m8_t shift,size_t vl)935 vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
936                                   vuint64m8_t op1, vuint64m8_t shift,
937                                   size_t vl) {
938   return vssrl_vv_u64m8_m(mask, maskedoff, op1, shift, vl);
939 }
940 
941 //
942 // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8_m(
943 // CHECK-RV64-NEXT:  entry:
944 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
945 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
946 //
test_vssrl_vx_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,size_t shift,size_t vl)947 vuint64m8_t test_vssrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
948                                   vuint64m8_t op1, size_t shift, size_t vl) {
949   return vssrl_vx_u64m8_m(mask, maskedoff, op1, shift, vl);
950 }
951