1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
4 
5 #include <riscv_vector.h>
6 
7 //
8 // CHECK-RV64-LABEL: @test_vssra_vv_i8mf8(
9 // CHECK-RV64-NEXT:  entry:
10 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
11 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
12 //
test_vssra_vv_i8mf8(vint8mf8_t op1,vuint8mf8_t shift,size_t vl)13 vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
14   return vssra_vv_i8mf8(op1, shift, vl);
15 }
16 
17 //
18 // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8(
19 // CHECK-RV64-NEXT:  entry:
20 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
21 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
22 //
test_vssra_vx_i8mf8(vint8mf8_t op1,size_t shift,size_t vl)23 vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
24   return vssra_vx_i8mf8(op1, shift, vl);
25 }
26 
27 //
28 // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4(
29 // CHECK-RV64-NEXT:  entry:
30 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
31 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
32 //
test_vssra_vv_i8mf4(vint8mf4_t op1,vuint8mf4_t shift,size_t vl)33 vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
34   return vssra_vv_i8mf4(op1, shift, vl);
35 }
36 
37 //
38 // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4(
39 // CHECK-RV64-NEXT:  entry:
40 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
41 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
42 //
test_vssra_vx_i8mf4(vint8mf4_t op1,size_t shift,size_t vl)43 vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
44   return vssra_vx_i8mf4(op1, shift, vl);
45 }
46 
47 //
48 // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2(
49 // CHECK-RV64-NEXT:  entry:
50 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
51 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
52 //
test_vssra_vv_i8mf2(vint8mf2_t op1,vuint8mf2_t shift,size_t vl)53 vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
54   return vssra_vv_i8mf2(op1, shift, vl);
55 }
56 
57 //
58 // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2(
59 // CHECK-RV64-NEXT:  entry:
60 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
61 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
62 //
test_vssra_vx_i8mf2(vint8mf2_t op1,size_t shift,size_t vl)63 vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
64   return vssra_vx_i8mf2(op1, shift, vl);
65 }
66 
67 //
68 // CHECK-RV64-LABEL: @test_vssra_vv_i8m1(
69 // CHECK-RV64-NEXT:  entry:
70 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
71 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
72 //
test_vssra_vv_i8m1(vint8m1_t op1,vuint8m1_t shift,size_t vl)73 vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
74   return vssra_vv_i8m1(op1, shift, vl);
75 }
76 
77 //
78 // CHECK-RV64-LABEL: @test_vssra_vx_i8m1(
79 // CHECK-RV64-NEXT:  entry:
80 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
81 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
82 //
test_vssra_vx_i8m1(vint8m1_t op1,size_t shift,size_t vl)83 vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
84   return vssra_vx_i8m1(op1, shift, vl);
85 }
86 
87 //
88 // CHECK-RV64-LABEL: @test_vssra_vv_i8m2(
89 // CHECK-RV64-NEXT:  entry:
90 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
91 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
92 //
test_vssra_vv_i8m2(vint8m2_t op1,vuint8m2_t shift,size_t vl)93 vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
94   return vssra_vv_i8m2(op1, shift, vl);
95 }
96 
97 //
98 // CHECK-RV64-LABEL: @test_vssra_vx_i8m2(
99 // CHECK-RV64-NEXT:  entry:
100 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
101 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
102 //
test_vssra_vx_i8m2(vint8m2_t op1,size_t shift,size_t vl)103 vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
104   return vssra_vx_i8m2(op1, shift, vl);
105 }
106 
107 //
108 // CHECK-RV64-LABEL: @test_vssra_vv_i8m4(
109 // CHECK-RV64-NEXT:  entry:
110 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
111 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
112 //
test_vssra_vv_i8m4(vint8m4_t op1,vuint8m4_t shift,size_t vl)113 vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
114   return vssra_vv_i8m4(op1, shift, vl);
115 }
116 
117 //
118 // CHECK-RV64-LABEL: @test_vssra_vx_i8m4(
119 // CHECK-RV64-NEXT:  entry:
120 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
121 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
122 //
test_vssra_vx_i8m4(vint8m4_t op1,size_t shift,size_t vl)123 vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
124   return vssra_vx_i8m4(op1, shift, vl);
125 }
126 
127 //
128 // CHECK-RV64-LABEL: @test_vssra_vv_i8m8(
129 // CHECK-RV64-NEXT:  entry:
130 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
131 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
132 //
test_vssra_vv_i8m8(vint8m8_t op1,vuint8m8_t shift,size_t vl)133 vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
134   return vssra_vv_i8m8(op1, shift, vl);
135 }
136 
137 //
138 // CHECK-RV64-LABEL: @test_vssra_vx_i8m8(
139 // CHECK-RV64-NEXT:  entry:
140 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
141 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
142 //
test_vssra_vx_i8m8(vint8m8_t op1,size_t shift,size_t vl)143 vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
144   return vssra_vx_i8m8(op1, shift, vl);
145 }
146 
147 //
148 // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4(
149 // CHECK-RV64-NEXT:  entry:
150 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
151 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
152 //
test_vssra_vv_i16mf4(vint16mf4_t op1,vuint16mf4_t shift,size_t vl)153 vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift,
154                                  size_t vl) {
155   return vssra_vv_i16mf4(op1, shift, vl);
156 }
157 
158 //
159 // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4(
160 // CHECK-RV64-NEXT:  entry:
161 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
162 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
163 //
test_vssra_vx_i16mf4(vint16mf4_t op1,size_t shift,size_t vl)164 vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
165   return vssra_vx_i16mf4(op1, shift, vl);
166 }
167 
168 //
169 // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2(
170 // CHECK-RV64-NEXT:  entry:
171 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
172 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
173 //
test_vssra_vv_i16mf2(vint16mf2_t op1,vuint16mf2_t shift,size_t vl)174 vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift,
175                                  size_t vl) {
176   return vssra_vv_i16mf2(op1, shift, vl);
177 }
178 
179 //
180 // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2(
181 // CHECK-RV64-NEXT:  entry:
182 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
183 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
184 //
test_vssra_vx_i16mf2(vint16mf2_t op1,size_t shift,size_t vl)185 vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
186   return vssra_vx_i16mf2(op1, shift, vl);
187 }
188 
189 //
190 // CHECK-RV64-LABEL: @test_vssra_vv_i16m1(
191 // CHECK-RV64-NEXT:  entry:
192 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
193 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
194 //
test_vssra_vv_i16m1(vint16m1_t op1,vuint16m1_t shift,size_t vl)195 vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
196   return vssra_vv_i16m1(op1, shift, vl);
197 }
198 
199 //
200 // CHECK-RV64-LABEL: @test_vssra_vx_i16m1(
201 // CHECK-RV64-NEXT:  entry:
202 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
203 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
204 //
test_vssra_vx_i16m1(vint16m1_t op1,size_t shift,size_t vl)205 vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
206   return vssra_vx_i16m1(op1, shift, vl);
207 }
208 
209 //
210 // CHECK-RV64-LABEL: @test_vssra_vv_i16m2(
211 // CHECK-RV64-NEXT:  entry:
212 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
213 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
214 //
test_vssra_vv_i16m2(vint16m2_t op1,vuint16m2_t shift,size_t vl)215 vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
216   return vssra_vv_i16m2(op1, shift, vl);
217 }
218 
219 //
220 // CHECK-RV64-LABEL: @test_vssra_vx_i16m2(
221 // CHECK-RV64-NEXT:  entry:
222 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
223 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
224 //
test_vssra_vx_i16m2(vint16m2_t op1,size_t shift,size_t vl)225 vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
226   return vssra_vx_i16m2(op1, shift, vl);
227 }
228 
229 //
230 // CHECK-RV64-LABEL: @test_vssra_vv_i16m4(
231 // CHECK-RV64-NEXT:  entry:
232 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
233 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
234 //
test_vssra_vv_i16m4(vint16m4_t op1,vuint16m4_t shift,size_t vl)235 vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
236   return vssra_vv_i16m4(op1, shift, vl);
237 }
238 
239 //
240 // CHECK-RV64-LABEL: @test_vssra_vx_i16m4(
241 // CHECK-RV64-NEXT:  entry:
242 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
243 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
244 //
test_vssra_vx_i16m4(vint16m4_t op1,size_t shift,size_t vl)245 vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
246   return vssra_vx_i16m4(op1, shift, vl);
247 }
248 
249 //
250 // CHECK-RV64-LABEL: @test_vssra_vv_i16m8(
251 // CHECK-RV64-NEXT:  entry:
252 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
253 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
254 //
test_vssra_vv_i16m8(vint16m8_t op1,vuint16m8_t shift,size_t vl)255 vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
256   return vssra_vv_i16m8(op1, shift, vl);
257 }
258 
259 //
260 // CHECK-RV64-LABEL: @test_vssra_vx_i16m8(
261 // CHECK-RV64-NEXT:  entry:
262 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
263 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
264 //
test_vssra_vx_i16m8(vint16m8_t op1,size_t shift,size_t vl)265 vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
266   return vssra_vx_i16m8(op1, shift, vl);
267 }
268 
269 //
270 // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2(
271 // CHECK-RV64-NEXT:  entry:
272 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
273 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
274 //
test_vssra_vv_i32mf2(vint32mf2_t op1,vuint32mf2_t shift,size_t vl)275 vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift,
276                                  size_t vl) {
277   return vssra_vv_i32mf2(op1, shift, vl);
278 }
279 
280 //
281 // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2(
282 // CHECK-RV64-NEXT:  entry:
283 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
284 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
285 //
test_vssra_vx_i32mf2(vint32mf2_t op1,size_t shift,size_t vl)286 vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
287   return vssra_vx_i32mf2(op1, shift, vl);
288 }
289 
290 //
291 // CHECK-RV64-LABEL: @test_vssra_vv_i32m1(
292 // CHECK-RV64-NEXT:  entry:
293 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
294 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
295 //
test_vssra_vv_i32m1(vint32m1_t op1,vuint32m1_t shift,size_t vl)296 vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
297   return vssra_vv_i32m1(op1, shift, vl);
298 }
299 
300 //
301 // CHECK-RV64-LABEL: @test_vssra_vx_i32m1(
302 // CHECK-RV64-NEXT:  entry:
303 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
304 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
305 //
test_vssra_vx_i32m1(vint32m1_t op1,size_t shift,size_t vl)306 vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
307   return vssra_vx_i32m1(op1, shift, vl);
308 }
309 
310 //
311 // CHECK-RV64-LABEL: @test_vssra_vv_i32m2(
312 // CHECK-RV64-NEXT:  entry:
313 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
314 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
315 //
test_vssra_vv_i32m2(vint32m2_t op1,vuint32m2_t shift,size_t vl)316 vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
317   return vssra_vv_i32m2(op1, shift, vl);
318 }
319 
320 //
321 // CHECK-RV64-LABEL: @test_vssra_vx_i32m2(
322 // CHECK-RV64-NEXT:  entry:
323 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
324 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
325 //
test_vssra_vx_i32m2(vint32m2_t op1,size_t shift,size_t vl)326 vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
327   return vssra_vx_i32m2(op1, shift, vl);
328 }
329 
330 //
331 // CHECK-RV64-LABEL: @test_vssra_vv_i32m4(
332 // CHECK-RV64-NEXT:  entry:
333 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
334 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
335 //
test_vssra_vv_i32m4(vint32m4_t op1,vuint32m4_t shift,size_t vl)336 vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
337   return vssra_vv_i32m4(op1, shift, vl);
338 }
339 
340 //
341 // CHECK-RV64-LABEL: @test_vssra_vx_i32m4(
342 // CHECK-RV64-NEXT:  entry:
343 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
344 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
345 //
test_vssra_vx_i32m4(vint32m4_t op1,size_t shift,size_t vl)346 vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
347   return vssra_vx_i32m4(op1, shift, vl);
348 }
349 
350 //
351 // CHECK-RV64-LABEL: @test_vssra_vv_i32m8(
352 // CHECK-RV64-NEXT:  entry:
353 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
354 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
355 //
test_vssra_vv_i32m8(vint32m8_t op1,vuint32m8_t shift,size_t vl)356 vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
357   return vssra_vv_i32m8(op1, shift, vl);
358 }
359 
360 //
361 // CHECK-RV64-LABEL: @test_vssra_vx_i32m8(
362 // CHECK-RV64-NEXT:  entry:
363 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
364 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
365 //
test_vssra_vx_i32m8(vint32m8_t op1,size_t shift,size_t vl)366 vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
367   return vssra_vx_i32m8(op1, shift, vl);
368 }
369 
370 //
371 // CHECK-RV64-LABEL: @test_vssra_vv_i64m1(
372 // CHECK-RV64-NEXT:  entry:
373 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
374 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
375 //
test_vssra_vv_i64m1(vint64m1_t op1,vuint64m1_t shift,size_t vl)376 vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
377   return vssra_vv_i64m1(op1, shift, vl);
378 }
379 
380 //
381 // CHECK-RV64-LABEL: @test_vssra_vx_i64m1(
382 // CHECK-RV64-NEXT:  entry:
383 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
384 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
385 //
test_vssra_vx_i64m1(vint64m1_t op1,size_t shift,size_t vl)386 vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
387   return vssra_vx_i64m1(op1, shift, vl);
388 }
389 
390 //
391 // CHECK-RV64-LABEL: @test_vssra_vv_i64m2(
392 // CHECK-RV64-NEXT:  entry:
393 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
394 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
395 //
test_vssra_vv_i64m2(vint64m2_t op1,vuint64m2_t shift,size_t vl)396 vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
397   return vssra_vv_i64m2(op1, shift, vl);
398 }
399 
400 //
401 // CHECK-RV64-LABEL: @test_vssra_vx_i64m2(
402 // CHECK-RV64-NEXT:  entry:
403 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
404 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
405 //
test_vssra_vx_i64m2(vint64m2_t op1,size_t shift,size_t vl)406 vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
407   return vssra_vx_i64m2(op1, shift, vl);
408 }
409 
410 //
411 // CHECK-RV64-LABEL: @test_vssra_vv_i64m4(
412 // CHECK-RV64-NEXT:  entry:
413 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
414 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
415 //
test_vssra_vv_i64m4(vint64m4_t op1,vuint64m4_t shift,size_t vl)416 vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
417   return vssra_vv_i64m4(op1, shift, vl);
418 }
419 
420 //
421 // CHECK-RV64-LABEL: @test_vssra_vx_i64m4(
422 // CHECK-RV64-NEXT:  entry:
423 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
424 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
425 //
test_vssra_vx_i64m4(vint64m4_t op1,size_t shift,size_t vl)426 vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
427   return vssra_vx_i64m4(op1, shift, vl);
428 }
429 
430 //
431 // CHECK-RV64-LABEL: @test_vssra_vv_i64m8(
432 // CHECK-RV64-NEXT:  entry:
433 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
434 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
435 //
test_vssra_vv_i64m8(vint64m8_t op1,vuint64m8_t shift,size_t vl)436 vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
437   return vssra_vv_i64m8(op1, shift, vl);
438 }
439 
440 //
441 // CHECK-RV64-LABEL: @test_vssra_vx_i64m8(
442 // CHECK-RV64-NEXT:  entry:
443 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
444 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
445 //
test_vssra_vx_i64m8(vint64m8_t op1,size_t shift,size_t vl)446 vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
447   return vssra_vx_i64m8(op1, shift, vl);
448 }
449 
450 //
451 // CHECK-RV64-LABEL: @test_vssra_vv_i8mf8_m(
452 // CHECK-RV64-NEXT:  entry:
453 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
454 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
455 //
test_vssra_vv_i8mf8_m(vbool64_t mask,vint8mf8_t maskedoff,vint8mf8_t op1,vuint8mf8_t shift,size_t vl)456 vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
457                                  vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
458   return vssra_vv_i8mf8_m(mask, maskedoff, op1, shift, vl);
459 }
460 
461 //
462 // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8_m(
463 // CHECK-RV64-NEXT:  entry:
464 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
465 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
466 //
test_vssra_vx_i8mf8_m(vbool64_t mask,vint8mf8_t maskedoff,vint8mf8_t op1,size_t shift,size_t vl)467 vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
468                                  vint8mf8_t op1, size_t shift, size_t vl) {
469   return vssra_vx_i8mf8_m(mask, maskedoff, op1, shift, vl);
470 }
471 
472 //
473 // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4_m(
474 // CHECK-RV64-NEXT:  entry:
475 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
476 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
477 //
test_vssra_vv_i8mf4_m(vbool32_t mask,vint8mf4_t maskedoff,vint8mf4_t op1,vuint8mf4_t shift,size_t vl)478 vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
479                                  vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
480   return vssra_vv_i8mf4_m(mask, maskedoff, op1, shift, vl);
481 }
482 
483 //
484 // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4_m(
485 // CHECK-RV64-NEXT:  entry:
486 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
487 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
488 //
test_vssra_vx_i8mf4_m(vbool32_t mask,vint8mf4_t maskedoff,vint8mf4_t op1,size_t shift,size_t vl)489 vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
490                                  vint8mf4_t op1, size_t shift, size_t vl) {
491   return vssra_vx_i8mf4_m(mask, maskedoff, op1, shift, vl);
492 }
493 
494 //
495 // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2_m(
496 // CHECK-RV64-NEXT:  entry:
497 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
498 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
499 //
test_vssra_vv_i8mf2_m(vbool16_t mask,vint8mf2_t maskedoff,vint8mf2_t op1,vuint8mf2_t shift,size_t vl)500 vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
501                                  vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
502   return vssra_vv_i8mf2_m(mask, maskedoff, op1, shift, vl);
503 }
504 
505 //
506 // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2_m(
507 // CHECK-RV64-NEXT:  entry:
508 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
509 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
510 //
test_vssra_vx_i8mf2_m(vbool16_t mask,vint8mf2_t maskedoff,vint8mf2_t op1,size_t shift,size_t vl)511 vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
512                                  vint8mf2_t op1, size_t shift, size_t vl) {
513   return vssra_vx_i8mf2_m(mask, maskedoff, op1, shift, vl);
514 }
515 
516 //
517 // CHECK-RV64-LABEL: @test_vssra_vv_i8m1_m(
518 // CHECK-RV64-NEXT:  entry:
519 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
520 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
521 //
test_vssra_vv_i8m1_m(vbool8_t mask,vint8m1_t maskedoff,vint8m1_t op1,vuint8m1_t shift,size_t vl)522 vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
523                                vint8m1_t op1, vuint8m1_t shift, size_t vl) {
524   return vssra_vv_i8m1_m(mask, maskedoff, op1, shift, vl);
525 }
526 
527 //
528 // CHECK-RV64-LABEL: @test_vssra_vx_i8m1_m(
529 // CHECK-RV64-NEXT:  entry:
530 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
531 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
532 //
test_vssra_vx_i8m1_m(vbool8_t mask,vint8m1_t maskedoff,vint8m1_t op1,size_t shift,size_t vl)533 vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
534                                vint8m1_t op1, size_t shift, size_t vl) {
535   return vssra_vx_i8m1_m(mask, maskedoff, op1, shift, vl);
536 }
537 
538 //
539 // CHECK-RV64-LABEL: @test_vssra_vv_i8m2_m(
540 // CHECK-RV64-NEXT:  entry:
541 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
542 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
543 //
test_vssra_vv_i8m2_m(vbool4_t mask,vint8m2_t maskedoff,vint8m2_t op1,vuint8m2_t shift,size_t vl)544 vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
545                                vint8m2_t op1, vuint8m2_t shift, size_t vl) {
546   return vssra_vv_i8m2_m(mask, maskedoff, op1, shift, vl);
547 }
548 
549 //
550 // CHECK-RV64-LABEL: @test_vssra_vx_i8m2_m(
551 // CHECK-RV64-NEXT:  entry:
552 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
553 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
554 //
test_vssra_vx_i8m2_m(vbool4_t mask,vint8m2_t maskedoff,vint8m2_t op1,size_t shift,size_t vl)555 vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
556                                vint8m2_t op1, size_t shift, size_t vl) {
557   return vssra_vx_i8m2_m(mask, maskedoff, op1, shift, vl);
558 }
559 
560 //
561 // CHECK-RV64-LABEL: @test_vssra_vv_i8m4_m(
562 // CHECK-RV64-NEXT:  entry:
563 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
564 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
565 //
test_vssra_vv_i8m4_m(vbool2_t mask,vint8m4_t maskedoff,vint8m4_t op1,vuint8m4_t shift,size_t vl)566 vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
567                                vint8m4_t op1, vuint8m4_t shift, size_t vl) {
568   return vssra_vv_i8m4_m(mask, maskedoff, op1, shift, vl);
569 }
570 
571 //
572 // CHECK-RV64-LABEL: @test_vssra_vx_i8m4_m(
573 // CHECK-RV64-NEXT:  entry:
574 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
575 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
576 //
test_vssra_vx_i8m4_m(vbool2_t mask,vint8m4_t maskedoff,vint8m4_t op1,size_t shift,size_t vl)577 vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
578                                vint8m4_t op1, size_t shift, size_t vl) {
579   return vssra_vx_i8m4_m(mask, maskedoff, op1, shift, vl);
580 }
581 
582 //
583 // CHECK-RV64-LABEL: @test_vssra_vv_i8m8_m(
584 // CHECK-RV64-NEXT:  entry:
585 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
586 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
587 //
test_vssra_vv_i8m8_m(vbool1_t mask,vint8m8_t maskedoff,vint8m8_t op1,vuint8m8_t shift,size_t vl)588 vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
589                                vint8m8_t op1, vuint8m8_t shift, size_t vl) {
590   return vssra_vv_i8m8_m(mask, maskedoff, op1, shift, vl);
591 }
592 
593 //
594 // CHECK-RV64-LABEL: @test_vssra_vx_i8m8_m(
595 // CHECK-RV64-NEXT:  entry:
596 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
597 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
598 //
test_vssra_vx_i8m8_m(vbool1_t mask,vint8m8_t maskedoff,vint8m8_t op1,size_t shift,size_t vl)599 vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
600                                vint8m8_t op1, size_t shift, size_t vl) {
601   return vssra_vx_i8m8_m(mask, maskedoff, op1, shift, vl);
602 }
603 
604 //
605 // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4_m(
606 // CHECK-RV64-NEXT:  entry:
607 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
608 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
609 //
test_vssra_vv_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,vuint16mf4_t shift,size_t vl)610 vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
611                                    vint16mf4_t op1, vuint16mf4_t shift,
612                                    size_t vl) {
613   return vssra_vv_i16mf4_m(mask, maskedoff, op1, shift, vl);
614 }
615 
616 //
617 // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4_m(
618 // CHECK-RV64-NEXT:  entry:
619 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
620 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
621 //
test_vssra_vx_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,size_t shift,size_t vl)622 vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
623                                    vint16mf4_t op1, size_t shift, size_t vl) {
624   return vssra_vx_i16mf4_m(mask, maskedoff, op1, shift, vl);
625 }
626 
627 //
628 // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2_m(
629 // CHECK-RV64-NEXT:  entry:
630 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
631 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
632 //
test_vssra_vv_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,vuint16mf2_t shift,size_t vl)633 vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
634                                    vint16mf2_t op1, vuint16mf2_t shift,
635                                    size_t vl) {
636   return vssra_vv_i16mf2_m(mask, maskedoff, op1, shift, vl);
637 }
638 
639 //
640 // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2_m(
641 // CHECK-RV64-NEXT:  entry:
642 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
643 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
644 //
test_vssra_vx_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,size_t shift,size_t vl)645 vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
646                                    vint16mf2_t op1, size_t shift, size_t vl) {
647   return vssra_vx_i16mf2_m(mask, maskedoff, op1, shift, vl);
648 }
649 
650 //
651 // CHECK-RV64-LABEL: @test_vssra_vv_i16m1_m(
652 // CHECK-RV64-NEXT:  entry:
653 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
654 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
655 //
test_vssra_vv_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,vuint16m1_t shift,size_t vl)656 vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
657                                  vint16m1_t op1, vuint16m1_t shift, size_t vl) {
658   return vssra_vv_i16m1_m(mask, maskedoff, op1, shift, vl);
659 }
660 
661 //
662 // CHECK-RV64-LABEL: @test_vssra_vx_i16m1_m(
663 // CHECK-RV64-NEXT:  entry:
664 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
665 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
666 //
test_vssra_vx_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,size_t shift,size_t vl)667 vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
668                                  vint16m1_t op1, size_t shift, size_t vl) {
669   return vssra_vx_i16m1_m(mask, maskedoff, op1, shift, vl);
670 }
671 
672 //
673 // CHECK-RV64-LABEL: @test_vssra_vv_i16m2_m(
674 // CHECK-RV64-NEXT:  entry:
675 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
676 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
677 //
test_vssra_vv_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,vuint16m2_t shift,size_t vl)678 vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
679                                  vint16m2_t op1, vuint16m2_t shift, size_t vl) {
680   return vssra_vv_i16m2_m(mask, maskedoff, op1, shift, vl);
681 }
682 
683 //
684 // CHECK-RV64-LABEL: @test_vssra_vx_i16m2_m(
685 // CHECK-RV64-NEXT:  entry:
686 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
687 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
688 //
test_vssra_vx_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,size_t shift,size_t vl)689 vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
690                                  vint16m2_t op1, size_t shift, size_t vl) {
691   return vssra_vx_i16m2_m(mask, maskedoff, op1, shift, vl);
692 }
693 
694 //
695 // CHECK-RV64-LABEL: @test_vssra_vv_i16m4_m(
696 // CHECK-RV64-NEXT:  entry:
697 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
698 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
699 //
test_vssra_vv_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,vuint16m4_t shift,size_t vl)700 vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
701                                  vint16m4_t op1, vuint16m4_t shift, size_t vl) {
702   return vssra_vv_i16m4_m(mask, maskedoff, op1, shift, vl);
703 }
704 
705 //
706 // CHECK-RV64-LABEL: @test_vssra_vx_i16m4_m(
707 // CHECK-RV64-NEXT:  entry:
708 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
709 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
710 //
test_vssra_vx_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,size_t shift,size_t vl)711 vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
712                                  vint16m4_t op1, size_t shift, size_t vl) {
713   return vssra_vx_i16m4_m(mask, maskedoff, op1, shift, vl);
714 }
715 
716 //
717 // CHECK-RV64-LABEL: @test_vssra_vv_i16m8_m(
718 // CHECK-RV64-NEXT:  entry:
719 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
720 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
721 //
test_vssra_vv_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,vuint16m8_t shift,size_t vl)722 vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
723                                  vint16m8_t op1, vuint16m8_t shift, size_t vl) {
724   return vssra_vv_i16m8_m(mask, maskedoff, op1, shift, vl);
725 }
726 
727 //
728 // CHECK-RV64-LABEL: @test_vssra_vx_i16m8_m(
729 // CHECK-RV64-NEXT:  entry:
730 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
731 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
732 //
test_vssra_vx_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,size_t shift,size_t vl)733 vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
734                                  vint16m8_t op1, size_t shift, size_t vl) {
735   return vssra_vx_i16m8_m(mask, maskedoff, op1, shift, vl);
736 }
737 
738 //
739 // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_m(
740 // CHECK-RV64-NEXT:  entry:
741 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
742 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
743 //
test_vssra_vv_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,vuint32mf2_t shift,size_t vl)744 vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
745                                    vint32mf2_t op1, vuint32mf2_t shift,
746                                    size_t vl) {
747   return vssra_vv_i32mf2_m(mask, maskedoff, op1, shift, vl);
748 }
749 
750 //
751 // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_m(
752 // CHECK-RV64-NEXT:  entry:
753 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
754 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
755 //
test_vssra_vx_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,size_t shift,size_t vl)756 vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
757                                    vint32mf2_t op1, size_t shift, size_t vl) {
758   return vssra_vx_i32mf2_m(mask, maskedoff, op1, shift, vl);
759 }
760 
761 //
762 // CHECK-RV64-LABEL: @test_vssra_vv_i32m1_m(
763 // CHECK-RV64-NEXT:  entry:
764 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
765 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
766 //
test_vssra_vv_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,vuint32m1_t shift,size_t vl)767 vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
768                                  vint32m1_t op1, vuint32m1_t shift, size_t vl) {
769   return vssra_vv_i32m1_m(mask, maskedoff, op1, shift, vl);
770 }
771 
772 //
773 // CHECK-RV64-LABEL: @test_vssra_vx_i32m1_m(
774 // CHECK-RV64-NEXT:  entry:
775 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
776 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
777 //
test_vssra_vx_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,size_t shift,size_t vl)778 vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
779                                  vint32m1_t op1, size_t shift, size_t vl) {
780   return vssra_vx_i32m1_m(mask, maskedoff, op1, shift, vl);
781 }
782 
783 //
784 // CHECK-RV64-LABEL: @test_vssra_vv_i32m2_m(
785 // CHECK-RV64-NEXT:  entry:
786 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
787 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
788 //
test_vssra_vv_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,vuint32m2_t shift,size_t vl)789 vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
790                                  vint32m2_t op1, vuint32m2_t shift, size_t vl) {
791   return vssra_vv_i32m2_m(mask, maskedoff, op1, shift, vl);
792 }
793 
794 //
795 // CHECK-RV64-LABEL: @test_vssra_vx_i32m2_m(
796 // CHECK-RV64-NEXT:  entry:
797 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
798 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
799 //
test_vssra_vx_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,size_t shift,size_t vl)800 vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
801                                  vint32m2_t op1, size_t shift, size_t vl) {
802   return vssra_vx_i32m2_m(mask, maskedoff, op1, shift, vl);
803 }
804 
805 //
806 // CHECK-RV64-LABEL: @test_vssra_vv_i32m4_m(
807 // CHECK-RV64-NEXT:  entry:
808 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
809 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
810 //
test_vssra_vv_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,vuint32m4_t shift,size_t vl)811 vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
812                                  vint32m4_t op1, vuint32m4_t shift, size_t vl) {
813   return vssra_vv_i32m4_m(mask, maskedoff, op1, shift, vl);
814 }
815 
816 //
817 // CHECK-RV64-LABEL: @test_vssra_vx_i32m4_m(
818 // CHECK-RV64-NEXT:  entry:
819 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
820 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
821 //
test_vssra_vx_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,size_t shift,size_t vl)822 vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
823                                  vint32m4_t op1, size_t shift, size_t vl) {
824   return vssra_vx_i32m4_m(mask, maskedoff, op1, shift, vl);
825 }
826 
827 //
828 // CHECK-RV64-LABEL: @test_vssra_vv_i32m8_m(
829 // CHECK-RV64-NEXT:  entry:
830 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
831 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
832 //
test_vssra_vv_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,vuint32m8_t shift,size_t vl)833 vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
834                                  vint32m8_t op1, vuint32m8_t shift, size_t vl) {
835   return vssra_vv_i32m8_m(mask, maskedoff, op1, shift, vl);
836 }
837 
838 //
839 // CHECK-RV64-LABEL: @test_vssra_vx_i32m8_m(
840 // CHECK-RV64-NEXT:  entry:
841 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
842 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
843 //
test_vssra_vx_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,size_t shift,size_t vl)844 vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
845                                  vint32m8_t op1, size_t shift, size_t vl) {
846   return vssra_vx_i32m8_m(mask, maskedoff, op1, shift, vl);
847 }
848 
849 //
850 // CHECK-RV64-LABEL: @test_vssra_vv_i64m1_m(
851 // CHECK-RV64-NEXT:  entry:
852 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
853 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
854 //
test_vssra_vv_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,vuint64m1_t shift,size_t vl)855 vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
856                                  vint64m1_t op1, vuint64m1_t shift, size_t vl) {
857   return vssra_vv_i64m1_m(mask, maskedoff, op1, shift, vl);
858 }
859 
860 //
861 // CHECK-RV64-LABEL: @test_vssra_vx_i64m1_m(
862 // CHECK-RV64-NEXT:  entry:
863 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
864 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
865 //
test_vssra_vx_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,size_t shift,size_t vl)866 vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
867                                  vint64m1_t op1, size_t shift, size_t vl) {
868   return vssra_vx_i64m1_m(mask, maskedoff, op1, shift, vl);
869 }
870 
871 //
872 // CHECK-RV64-LABEL: @test_vssra_vv_i64m2_m(
873 // CHECK-RV64-NEXT:  entry:
874 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
875 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
876 //
test_vssra_vv_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,vuint64m2_t shift,size_t vl)877 vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
878                                  vint64m2_t op1, vuint64m2_t shift, size_t vl) {
879   return vssra_vv_i64m2_m(mask, maskedoff, op1, shift, vl);
880 }
881 
882 //
883 // CHECK-RV64-LABEL: @test_vssra_vx_i64m2_m(
884 // CHECK-RV64-NEXT:  entry:
885 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
886 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
887 //
test_vssra_vx_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,size_t shift,size_t vl)888 vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
889                                  vint64m2_t op1, size_t shift, size_t vl) {
890   return vssra_vx_i64m2_m(mask, maskedoff, op1, shift, vl);
891 }
892 
893 //
894 // CHECK-RV64-LABEL: @test_vssra_vv_i64m4_m(
895 // CHECK-RV64-NEXT:  entry:
896 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
897 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
898 //
test_vssra_vv_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,vuint64m4_t shift,size_t vl)899 vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
900                                  vint64m4_t op1, vuint64m4_t shift, size_t vl) {
901   return vssra_vv_i64m4_m(mask, maskedoff, op1, shift, vl);
902 }
903 
904 //
905 // CHECK-RV64-LABEL: @test_vssra_vx_i64m4_m(
906 // CHECK-RV64-NEXT:  entry:
907 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
908 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
909 //
test_vssra_vx_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,size_t shift,size_t vl)910 vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
911                                  vint64m4_t op1, size_t shift, size_t vl) {
912   return vssra_vx_i64m4_m(mask, maskedoff, op1, shift, vl);
913 }
914 
915 //
916 // CHECK-RV64-LABEL: @test_vssra_vv_i64m8_m(
917 // CHECK-RV64-NEXT:  entry:
918 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
919 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
920 //
test_vssra_vv_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,vuint64m8_t shift,size_t vl)921 vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
922                                  vint64m8_t op1, vuint64m8_t shift, size_t vl) {
923   return vssra_vv_i64m8_m(mask, maskedoff, op1, shift, vl);
924 }
925 
926 //
927 // CHECK-RV64-LABEL: @test_vssra_vx_i64m8_m(
928 // CHECK-RV64-NEXT:  entry:
929 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
930 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
931 //
test_vssra_vx_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,size_t shift,size_t vl)932 vint64m8_t test_vssra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
933                                  vint64m8_t op1, size_t shift, size_t vl) {
934   return vssra_vx_i64m8_m(mask, maskedoff, op1, shift, vl);
935 }
936