1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
4
5 #include <riscv_vector.h>
6
7 //
8 // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8(
9 // CHECK-RV64-NEXT: entry:
10 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
11 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
12 //
test_vnsra_wv_i8mf8(vint16mf4_t op1,vuint8mf8_t shift,size_t vl)13 vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
14 return vnsra_wv_i8mf8(op1, shift, vl);
15 }
16
17 //
18 // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8(
19 // CHECK-RV64-NEXT: entry:
20 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
21 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
22 //
test_vnsra_wx_i8mf8(vint16mf4_t op1,size_t shift,size_t vl)23 vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
24 return vnsra_wx_i8mf8(op1, shift, vl);
25 }
26
27 //
28 // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4(
29 // CHECK-RV64-NEXT: entry:
30 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
31 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
32 //
test_vnsra_wv_i8mf4(vint16mf2_t op1,vuint8mf4_t shift,size_t vl)33 vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
34 return vnsra_wv_i8mf4(op1, shift, vl);
35 }
36
37 //
38 // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4(
39 // CHECK-RV64-NEXT: entry:
40 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
41 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
42 //
test_vnsra_wx_i8mf4(vint16mf2_t op1,size_t shift,size_t vl)43 vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
44 return vnsra_wx_i8mf4(op1, shift, vl);
45 }
46
47 //
48 // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2(
49 // CHECK-RV64-NEXT: entry:
50 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
51 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
52 //
test_vnsra_wv_i8mf2(vint16m1_t op1,vuint8mf2_t shift,size_t vl)53 vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
54 return vnsra_wv_i8mf2(op1, shift, vl);
55 }
56
57 //
58 // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2(
59 // CHECK-RV64-NEXT: entry:
60 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
61 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
62 //
test_vnsra_wx_i8mf2(vint16m1_t op1,size_t shift,size_t vl)63 vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
64 return vnsra_wx_i8mf2(op1, shift, vl);
65 }
66
67 //
68 // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1(
69 // CHECK-RV64-NEXT: entry:
70 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
71 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
72 //
test_vnsra_wv_i8m1(vint16m2_t op1,vuint8m1_t shift,size_t vl)73 vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
74 return vnsra_wv_i8m1(op1, shift, vl);
75 }
76
77 //
78 // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1(
79 // CHECK-RV64-NEXT: entry:
80 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
81 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
82 //
test_vnsra_wx_i8m1(vint16m2_t op1,size_t shift,size_t vl)83 vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
84 return vnsra_wx_i8m1(op1, shift, vl);
85 }
86
87 //
88 // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2(
89 // CHECK-RV64-NEXT: entry:
90 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
91 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
92 //
test_vnsra_wv_i8m2(vint16m4_t op1,vuint8m2_t shift,size_t vl)93 vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
94 return vnsra_wv_i8m2(op1, shift, vl);
95 }
96
97 //
98 // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2(
99 // CHECK-RV64-NEXT: entry:
100 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
101 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
102 //
test_vnsra_wx_i8m2(vint16m4_t op1,size_t shift,size_t vl)103 vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
104 return vnsra_wx_i8m2(op1, shift, vl);
105 }
106
107 //
108 // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4(
109 // CHECK-RV64-NEXT: entry:
110 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
111 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
112 //
test_vnsra_wv_i8m4(vint16m8_t op1,vuint8m4_t shift,size_t vl)113 vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
114 return vnsra_wv_i8m4(op1, shift, vl);
115 }
116
117 //
118 // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4(
119 // CHECK-RV64-NEXT: entry:
120 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
121 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
122 //
test_vnsra_wx_i8m4(vint16m8_t op1,size_t shift,size_t vl)123 vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
124 return vnsra_wx_i8m4(op1, shift, vl);
125 }
126
127 //
128 // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4(
129 // CHECK-RV64-NEXT: entry:
130 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
131 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
132 //
test_vnsra_wv_i16mf4(vint32mf2_t op1,vuint16mf4_t shift,size_t vl)133 vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
134 return vnsra_wv_i16mf4(op1, shift, vl);
135 }
136
137 //
138 // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4(
139 // CHECK-RV64-NEXT: entry:
140 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
141 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
142 //
test_vnsra_wx_i16mf4(vint32mf2_t op1,size_t shift,size_t vl)143 vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
144 return vnsra_wx_i16mf4(op1, shift, vl);
145 }
146
147 //
148 // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2(
149 // CHECK-RV64-NEXT: entry:
150 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
151 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
152 //
test_vnsra_wv_i16mf2(vint32m1_t op1,vuint16mf2_t shift,size_t vl)153 vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
154 return vnsra_wv_i16mf2(op1, shift, vl);
155 }
156
157 //
158 // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2(
159 // CHECK-RV64-NEXT: entry:
160 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
161 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
162 //
test_vnsra_wx_i16mf2(vint32m1_t op1,size_t shift,size_t vl)163 vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
164 return vnsra_wx_i16mf2(op1, shift, vl);
165 }
166
167 //
168 // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1(
169 // CHECK-RV64-NEXT: entry:
170 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
171 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
172 //
test_vnsra_wv_i16m1(vint32m2_t op1,vuint16m1_t shift,size_t vl)173 vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
174 return vnsra_wv_i16m1(op1, shift, vl);
175 }
176
177 //
178 // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1(
179 // CHECK-RV64-NEXT: entry:
180 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
181 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
182 //
test_vnsra_wx_i16m1(vint32m2_t op1,size_t shift,size_t vl)183 vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
184 return vnsra_wx_i16m1(op1, shift, vl);
185 }
186
187 //
188 // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2(
189 // CHECK-RV64-NEXT: entry:
190 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
191 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
192 //
test_vnsra_wv_i16m2(vint32m4_t op1,vuint16m2_t shift,size_t vl)193 vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
194 return vnsra_wv_i16m2(op1, shift, vl);
195 }
196
197 //
198 // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2(
199 // CHECK-RV64-NEXT: entry:
200 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
201 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
202 //
test_vnsra_wx_i16m2(vint32m4_t op1,size_t shift,size_t vl)203 vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
204 return vnsra_wx_i16m2(op1, shift, vl);
205 }
206
207 //
208 // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4(
209 // CHECK-RV64-NEXT: entry:
210 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
211 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
212 //
test_vnsra_wv_i16m4(vint32m8_t op1,vuint16m4_t shift,size_t vl)213 vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
214 return vnsra_wv_i16m4(op1, shift, vl);
215 }
216
217 //
218 // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4(
219 // CHECK-RV64-NEXT: entry:
220 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
221 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
222 //
test_vnsra_wx_i16m4(vint32m8_t op1,size_t shift,size_t vl)223 vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
224 return vnsra_wx_i16m4(op1, shift, vl);
225 }
226
227 //
228 // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2(
229 // CHECK-RV64-NEXT: entry:
230 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
231 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
232 //
test_vnsra_wv_i32mf2(vint64m1_t op1,vuint32mf2_t shift,size_t vl)233 vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
234 return vnsra_wv_i32mf2(op1, shift, vl);
235 }
236
237 //
238 // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2(
239 // CHECK-RV64-NEXT: entry:
240 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
241 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
242 //
test_vnsra_wx_i32mf2(vint64m1_t op1,size_t shift,size_t vl)243 vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
244 return vnsra_wx_i32mf2(op1, shift, vl);
245 }
246
247 //
248 // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1(
249 // CHECK-RV64-NEXT: entry:
250 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
251 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
252 //
test_vnsra_wv_i32m1(vint64m2_t op1,vuint32m1_t shift,size_t vl)253 vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
254 return vnsra_wv_i32m1(op1, shift, vl);
255 }
256
257 //
258 // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1(
259 // CHECK-RV64-NEXT: entry:
260 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
261 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
262 //
test_vnsra_wx_i32m1(vint64m2_t op1,size_t shift,size_t vl)263 vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
264 return vnsra_wx_i32m1(op1, shift, vl);
265 }
266
267 //
268 // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2(
269 // CHECK-RV64-NEXT: entry:
270 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
271 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
272 //
test_vnsra_wv_i32m2(vint64m4_t op1,vuint32m2_t shift,size_t vl)273 vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
274 return vnsra_wv_i32m2(op1, shift, vl);
275 }
276
277 //
278 // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2(
279 // CHECK-RV64-NEXT: entry:
280 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
281 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
282 //
test_vnsra_wx_i32m2(vint64m4_t op1,size_t shift,size_t vl)283 vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
284 return vnsra_wx_i32m2(op1, shift, vl);
285 }
286
287 //
288 // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4(
289 // CHECK-RV64-NEXT: entry:
290 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
291 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
292 //
test_vnsra_wv_i32m4(vint64m8_t op1,vuint32m4_t shift,size_t vl)293 vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
294 return vnsra_wv_i32m4(op1, shift, vl);
295 }
296
297 //
298 // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4(
299 // CHECK-RV64-NEXT: entry:
300 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
301 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
302 //
test_vnsra_wx_i32m4(vint64m8_t op1,size_t shift,size_t vl)303 vint32m4_t test_vnsra_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
304 return vnsra_wx_i32m4(op1, shift, vl);
305 }
306
307 //
308 // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8_m(
309 // CHECK-RV64-NEXT: entry:
310 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
311 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
312 //
test_vnsra_wv_i8mf8_m(vbool64_t mask,vint8mf8_t maskedoff,vint16mf4_t op1,vuint8mf8_t shift,size_t vl)313 vint8mf8_t test_vnsra_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
314 return vnsra_wv_i8mf8_m(mask, maskedoff, op1, shift, vl);
315 }
316
317 //
318 // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8_m(
319 // CHECK-RV64-NEXT: entry:
320 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
321 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
322 //
test_vnsra_wx_i8mf8_m(vbool64_t mask,vint8mf8_t maskedoff,vint16mf4_t op1,size_t shift,size_t vl)323 vint8mf8_t test_vnsra_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
324 return vnsra_wx_i8mf8_m(mask, maskedoff, op1, shift, vl);
325 }
326
327 //
328 // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4_m(
329 // CHECK-RV64-NEXT: entry:
330 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
331 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
332 //
test_vnsra_wv_i8mf4_m(vbool32_t mask,vint8mf4_t maskedoff,vint16mf2_t op1,vuint8mf4_t shift,size_t vl)333 vint8mf4_t test_vnsra_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
334 return vnsra_wv_i8mf4_m(mask, maskedoff, op1, shift, vl);
335 }
336
337 //
338 // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4_m(
339 // CHECK-RV64-NEXT: entry:
340 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
341 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
342 //
test_vnsra_wx_i8mf4_m(vbool32_t mask,vint8mf4_t maskedoff,vint16mf2_t op1,size_t shift,size_t vl)343 vint8mf4_t test_vnsra_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
344 return vnsra_wx_i8mf4_m(mask, maskedoff, op1, shift, vl);
345 }
346
347 //
348 // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2_m(
349 // CHECK-RV64-NEXT: entry:
350 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
351 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
352 //
test_vnsra_wv_i8mf2_m(vbool16_t mask,vint8mf2_t maskedoff,vint16m1_t op1,vuint8mf2_t shift,size_t vl)353 vint8mf2_t test_vnsra_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
354 return vnsra_wv_i8mf2_m(mask, maskedoff, op1, shift, vl);
355 }
356
357 //
358 // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2_m(
359 // CHECK-RV64-NEXT: entry:
360 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
361 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
362 //
test_vnsra_wx_i8mf2_m(vbool16_t mask,vint8mf2_t maskedoff,vint16m1_t op1,size_t shift,size_t vl)363 vint8mf2_t test_vnsra_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
364 return vnsra_wx_i8mf2_m(mask, maskedoff, op1, shift, vl);
365 }
366
367 //
368 // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1_m(
369 // CHECK-RV64-NEXT: entry:
370 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
371 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
372 //
test_vnsra_wv_i8m1_m(vbool8_t mask,vint8m1_t maskedoff,vint16m2_t op1,vuint8m1_t shift,size_t vl)373 vint8m1_t test_vnsra_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) {
374 return vnsra_wv_i8m1_m(mask, maskedoff, op1, shift, vl);
375 }
376
377 //
378 // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1_m(
379 // CHECK-RV64-NEXT: entry:
380 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
381 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
382 //
test_vnsra_wx_i8m1_m(vbool8_t mask,vint8m1_t maskedoff,vint16m2_t op1,size_t shift,size_t vl)383 vint8m1_t test_vnsra_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
384 return vnsra_wx_i8m1_m(mask, maskedoff, op1, shift, vl);
385 }
386
387 //
388 // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2_m(
389 // CHECK-RV64-NEXT: entry:
390 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
391 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
392 //
test_vnsra_wv_i8m2_m(vbool4_t mask,vint8m2_t maskedoff,vint16m4_t op1,vuint8m2_t shift,size_t vl)393 vint8m2_t test_vnsra_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) {
394 return vnsra_wv_i8m2_m(mask, maskedoff, op1, shift, vl);
395 }
396
397 //
398 // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2_m(
399 // CHECK-RV64-NEXT: entry:
400 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
401 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
402 //
test_vnsra_wx_i8m2_m(vbool4_t mask,vint8m2_t maskedoff,vint16m4_t op1,size_t shift,size_t vl)403 vint8m2_t test_vnsra_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
404 return vnsra_wx_i8m2_m(mask, maskedoff, op1, shift, vl);
405 }
406
407 //
408 // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4_m(
409 // CHECK-RV64-NEXT: entry:
410 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
411 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
412 //
test_vnsra_wv_i8m4_m(vbool2_t mask,vint8m4_t maskedoff,vint16m8_t op1,vuint8m4_t shift,size_t vl)413 vint8m4_t test_vnsra_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) {
414 return vnsra_wv_i8m4_m(mask, maskedoff, op1, shift, vl);
415 }
416
417 //
418 // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4_m(
419 // CHECK-RV64-NEXT: entry:
420 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
421 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
422 //
test_vnsra_wx_i8m4_m(vbool2_t mask,vint8m4_t maskedoff,vint16m8_t op1,size_t shift,size_t vl)423 vint8m4_t test_vnsra_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
424 return vnsra_wx_i8m4_m(mask, maskedoff, op1, shift, vl);
425 }
426
427 //
428 // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4_m(
429 // CHECK-RV64-NEXT: entry:
430 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
431 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
432 //
test_vnsra_wv_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint32mf2_t op1,vuint16mf4_t shift,size_t vl)433 vint16mf4_t test_vnsra_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
434 return vnsra_wv_i16mf4_m(mask, maskedoff, op1, shift, vl);
435 }
436
437 //
438 // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4_m(
439 // CHECK-RV64-NEXT: entry:
440 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
441 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
442 //
test_vnsra_wx_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint32mf2_t op1,size_t shift,size_t vl)443 vint16mf4_t test_vnsra_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
444 return vnsra_wx_i16mf4_m(mask, maskedoff, op1, shift, vl);
445 }
446
447 //
448 // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2_m(
449 // CHECK-RV64-NEXT: entry:
450 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
451 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
452 //
test_vnsra_wv_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint32m1_t op1,vuint16mf2_t shift,size_t vl)453 vint16mf2_t test_vnsra_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
454 return vnsra_wv_i16mf2_m(mask, maskedoff, op1, shift, vl);
455 }
456
457 //
458 // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2_m(
459 // CHECK-RV64-NEXT: entry:
460 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
461 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
462 //
test_vnsra_wx_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint32m1_t op1,size_t shift,size_t vl)463 vint16mf2_t test_vnsra_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
464 return vnsra_wx_i16mf2_m(mask, maskedoff, op1, shift, vl);
465 }
466
467 //
468 // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1_m(
469 // CHECK-RV64-NEXT: entry:
470 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
471 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
472 //
test_vnsra_wv_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint32m2_t op1,vuint16m1_t shift,size_t vl)473 vint16m1_t test_vnsra_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) {
474 return vnsra_wv_i16m1_m(mask, maskedoff, op1, shift, vl);
475 }
476
477 //
478 // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1_m(
479 // CHECK-RV64-NEXT: entry:
480 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
481 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
482 //
test_vnsra_wx_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint32m2_t op1,size_t shift,size_t vl)483 vint16m1_t test_vnsra_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
484 return vnsra_wx_i16m1_m(mask, maskedoff, op1, shift, vl);
485 }
486
487 //
488 // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2_m(
489 // CHECK-RV64-NEXT: entry:
490 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
491 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
492 //
test_vnsra_wv_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint32m4_t op1,vuint16m2_t shift,size_t vl)493 vint16m2_t test_vnsra_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) {
494 return vnsra_wv_i16m2_m(mask, maskedoff, op1, shift, vl);
495 }
496
497 //
498 // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2_m(
499 // CHECK-RV64-NEXT: entry:
500 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
501 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
502 //
test_vnsra_wx_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint32m4_t op1,size_t shift,size_t vl)503 vint16m2_t test_vnsra_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
504 return vnsra_wx_i16m2_m(mask, maskedoff, op1, shift, vl);
505 }
506
507 //
508 // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4_m(
509 // CHECK-RV64-NEXT: entry:
510 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
511 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
512 //
test_vnsra_wv_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint32m8_t op1,vuint16m4_t shift,size_t vl)513 vint16m4_t test_vnsra_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) {
514 return vnsra_wv_i16m4_m(mask, maskedoff, op1, shift, vl);
515 }
516
517 //
518 // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4_m(
519 // CHECK-RV64-NEXT: entry:
520 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
521 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
522 //
test_vnsra_wx_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint32m8_t op1,size_t shift,size_t vl)523 vint16m4_t test_vnsra_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
524 return vnsra_wx_i16m4_m(mask, maskedoff, op1, shift, vl);
525 }
526
527 //
528 // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_m(
529 // CHECK-RV64-NEXT: entry:
530 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
531 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
532 //
test_vnsra_wv_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint64m1_t op1,vuint32mf2_t shift,size_t vl)533 vint32mf2_t test_vnsra_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
534 return vnsra_wv_i32mf2_m(mask, maskedoff, op1, shift, vl);
535 }
536
537 //
538 // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_m(
539 // CHECK-RV64-NEXT: entry:
540 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
541 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
542 //
test_vnsra_wx_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint64m1_t op1,size_t shift,size_t vl)543 vint32mf2_t test_vnsra_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
544 return vnsra_wx_i32mf2_m(mask, maskedoff, op1, shift, vl);
545 }
546
547 //
548 // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1_m(
549 // CHECK-RV64-NEXT: entry:
550 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
551 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
552 //
test_vnsra_wv_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint64m2_t op1,vuint32m1_t shift,size_t vl)553 vint32m1_t test_vnsra_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
554 return vnsra_wv_i32m1_m(mask, maskedoff, op1, shift, vl);
555 }
556
557 //
558 // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1_m(
559 // CHECK-RV64-NEXT: entry:
560 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
561 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
562 //
test_vnsra_wx_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint64m2_t op1,size_t shift,size_t vl)563 vint32m1_t test_vnsra_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
564 return vnsra_wx_i32m1_m(mask, maskedoff, op1, shift, vl);
565 }
566
567 //
568 // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2_m(
569 // CHECK-RV64-NEXT: entry:
570 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
571 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
572 //
test_vnsra_wv_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint64m4_t op1,vuint32m2_t shift,size_t vl)573 vint32m2_t test_vnsra_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) {
574 return vnsra_wv_i32m2_m(mask, maskedoff, op1, shift, vl);
575 }
576
577 //
578 // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2_m(
579 // CHECK-RV64-NEXT: entry:
580 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
581 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
582 //
test_vnsra_wx_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint64m4_t op1,size_t shift,size_t vl)583 vint32m2_t test_vnsra_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
584 return vnsra_wx_i32m2_m(mask, maskedoff, op1, shift, vl);
585 }
586
587 //
588 // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4_m(
589 // CHECK-RV64-NEXT: entry:
590 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
591 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
592 //
test_vnsra_wv_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint64m8_t op1,vuint32m4_t shift,size_t vl)593 vint32m4_t test_vnsra_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) {
594 return vnsra_wv_i32m4_m(mask, maskedoff, op1, shift, vl);
595 }
596
597 //
598 // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4_m(
599 // CHECK-RV64-NEXT: entry:
600 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
601 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
602 //
test_vnsra_wx_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint64m8_t op1,size_t shift,size_t vl)603 vint32m4_t test_vnsra_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
604 return vnsra_wx_i32m4_m(mask, maskedoff, op1, shift, vl);
605 }
606